summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c536
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c41
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile4
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c364
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c330
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h96
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h42
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h23
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c612
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c385
-rw-r--r--drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c776
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_cmn.c633
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_cmn.h83
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c367
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c176
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c10
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h7
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c63
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c14
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c101
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c5
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c348
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h40
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c366
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c153
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c277
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c145
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c36
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c22
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c91
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c92
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c591
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c306
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c21
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c286
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h32
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c59
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c55
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c14
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/i915_request.c23
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c10
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h16
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c24
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h1
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c653
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h16
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c108
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c20
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c133
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c12
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c274
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c52
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h6
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h1102
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h102
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h125
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h403
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h3606
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c109
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h147
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c193
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c74
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h230
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c72
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h933
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c67
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c297
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c62
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c84
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c198
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h230
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c21
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c41
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c107
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c36
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base.h12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c233
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c74
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c159
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c69
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c129
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c44
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc907d.c69
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.c61
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c32
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/cursc37a.c23
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac507d.c30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac907d.c20
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c277
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h72
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c408
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c165
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c443
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c64
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c315
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c199
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/oimm507b.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c136
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c76
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c65
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c20
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c95
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h57
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c335
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c179
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/pmu.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/sec2.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl0039.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl006c.h46
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl006e.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl176e.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl206e.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl502d.h337
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl5039.h153
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl507a.h36
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl507c.h165
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h372
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl507e.h93
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl826f.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl827c.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl827d.h106
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl827e.h88
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl837d.h101
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl887d.h68
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl902d.h357
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl9039.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl906f.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl907c.h143
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h429
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl907e.h73
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h104
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cla0b5.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc37a.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc37b.h49
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc37d.h567
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc37e.h394
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h286
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc57e.h142
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/drf.h208
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/disp.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mem.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mmu.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/notify.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/parent.h25
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/printf.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h359
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push006c.h73
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push206e.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push507c.h25
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push906f.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/pushc37b.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/user.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/vmm.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c363
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo0039.c109
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo5039.c151
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo74c1.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo85b5.c74
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo9039.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo90b5.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_boa0b5.c90
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c90
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h80
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c132
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c89
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c186
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c41
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c290
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c288
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mem.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c)18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c5
-rw-r--r--drivers/gpu/drm/tegra/dc.c51
-rw-r--r--drivers/gpu/drm/tegra/dc.h3
-rw-r--r--drivers/gpu/drm/tegra/dsi.c9
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c1
-rw-r--r--drivers/gpu/drm/tegra/gr2d.h1
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c2
-rw-r--r--drivers/gpu/drm/tegra/hub.c17
-rw-r--r--drivers/gpu/drm/tegra/plane.c3
-rw-r--r--drivers/gpu/drm/tegra/plane.h3
-rw-r--r--drivers/gpu/drm/tegra/sor.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c4
-rw-r--r--drivers/gpu/host1x/bus.c9
-rw-r--r--drivers/gpu/host1x/debug.c4
-rw-r--r--drivers/gpu/host1x/dev.c11
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c6
-rw-r--r--drivers/gpu/host1x/job.c27
-rw-r--r--drivers/gpu/host1x/mipi.c37
501 files changed, 23440 insertions, 9048 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index c687432da426..29f767e026e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -2036,3 +2036,20 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
return 0;
}
+int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
+ uint32_t table,
+ uint16_t *size,
+ uint8_t *frev,
+ uint8_t *crev,
+ uint8_t **addr)
+{
+ uint16_t data_start;
+
+ if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
+ size, frev, crev, &data_start))
+ return -EINVAL;
+
+ *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index fd8f18074f7a..1321ec09c734 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -216,6 +216,13 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
u8 voltage_type,
u8 *svd_gpio_id, u8 *svc_gpio_id);
+int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
+ uint32_t table,
+ uint16_t *size,
+ uint8_t *frev,
+ uint8_t *crev,
+ uint8_t **addr);
+
void amdgpu_atombios_fini(struct amdgpu_device *adev);
int amdgpu_atombios_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a3fa1560de96..193ffdb957b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1073,6 +1073,57 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
}
+/**
+ * amdgpu_debugfs_regs_gfxoff_status - read gfxoff status
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
+ while (size) {
+ uint32_t value;
+
+ r = amdgpu_get_gfx_off_status(adev, &value);
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return r;
+ }
+
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return result;
+}
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -1123,7 +1174,9 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
.owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_read,
.write = amdgpu_debugfs_gfxoff_write,
+ .llseek = default_llseek
};
static const struct file_operations *debugfs_regs[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 7f3cd7185650..aa27fe65cdfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -425,6 +425,7 @@ struct amdgpu_pm {
u32 default_sclk;
u32 default_mclk;
struct amdgpu_i2c_chan *i2c_bus;
+ bool bus_locked;
/* internal thermal controller on rv6xx+ */
enum amdgpu_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 2eacf1f51bbf..26127c7d2f32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1186,7 +1186,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
- adev->mp1_state = PP_MP1_STATE_UNLOAD;
+ if (!amdgpu_passthrough(adev))
+ adev->mp1_state = PP_MP1_STATE_UNLOAD;
amdgpu_device_ip_suspend(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index d612033a23ac..78d37f92c7be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -578,6 +578,20 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
+{
+
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = smu_get_status_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 50be8e3a443b..1e7a2b0997c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -378,6 +378,7 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 20f39aa04fb9..5f20cadee343 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -796,8 +796,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1067,8 +1066,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@@ -1697,8 +1695,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index aa80cf799e42..fe7d39bb975d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -500,7 +500,6 @@ static int psp_asd_load(struct psp_context *psp)
* TODO: add version check to make it common
*/
if (amdgpu_sriov_vf(psp->adev) ||
- (psp->adev->asic_type == CHIP_SIENNA_CICHLID) ||
(psp->adev->asic_type == CHIP_NAVY_FLOUNDER))
return 0;
@@ -1988,7 +1987,7 @@ static int psp_suspend(void *handle)
ret = psp_tmr_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to terminate tmr\n");
+ DRM_ERROR("Falied to terminate tmr\n");
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 7e8647a05df7..9e7d640920fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -47,7 +47,6 @@ struct amdgpu_ras_eeprom_control {
uint32_t next_addr;
unsigned int num_recs;
struct mutex tbl_mutex;
- bool bus_locked;
uint32_t tbl_byte_sum;
uint16_t i2c_address; // 8-bit represented address
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index fcff5671f6f8..e11c5d69843d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1292,7 +1292,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
-static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
+static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1303,14 +1303,13 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
amdgpu_ttm_tt_unpin_userptr(ttm);
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
- return 0;
+ return;
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset);
- return r;
}
static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a777d585db84..495c3d7bb2b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -60,7 +60,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
- unsigned long bo_size, fw_shared_bo_size;
+ unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
@@ -176,6 +176,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
@@ -189,6 +190,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return r;
}
+ adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
+ bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+ adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
+ bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+
if (adev->vcn.indirect_sram) {
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
@@ -198,17 +204,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return r;
}
}
-
- r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
- &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
- if (r) {
- dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
- return r;
- }
-
- fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
- adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
}
return 0;
@@ -224,11 +219,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << j))
continue;
- kvfree(adev->vcn.inst[j].saved_shm_bo);
- amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
- &adev->vcn.inst[j].fw_shared_gpu_addr,
- (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
-
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
&adev->vcn.inst[j].dpg_sram_gpu_addr,
@@ -274,17 +264,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
return -ENOMEM;
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
-
- if (adev->vcn.inst[i].fw_shared_bo == NULL)
- return 0;
-
- if (!adev->vcn.inst[i].saved_shm_bo)
- return -ENOMEM;
-
- size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
- ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
-
- memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
}
return 0;
}
@@ -322,17 +301,6 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
}
memset_io(ptr, 0, size);
}
-
- if (adev->vcn.inst[i].fw_shared_bo == NULL)
- return -EINVAL;
-
- size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
- ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
-
- if (adev->vcn.inst[i].saved_shm_bo != NULL)
- memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
- else
- memset_io(ptr, 0, size);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index e125e8bfac54..7a9b804bc988 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -199,7 +199,6 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
- struct amdgpu_bo *fw_shared_bo;
struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
@@ -207,7 +206,6 @@ struct amdgpu_vcn_inst {
atomic_t dpg_enc_submission_cnt;
void *fw_shared_cpu_addr;
uint64_t fw_shared_gpu_addr;
- void *saved_shm_bo;
};
struct amdgpu_vcn {
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index fdabaf0db3e6..350f1bf063c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -40,6 +40,48 @@
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
+ * force_update_wptr_for_self_int - Force update the wptr for self interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @threshold: threshold to trigger the wptr reporting
+ * @timeout: timeout to trigger the wptr reporting
+ * @enabled: Enable/disable timeout flush mechanism
+ *
+ * threshold input range: 0 ~ 15, default 0,
+ * real_threshold = 2^threshold
+ * timeout input range: 0 ~ 20, default 8,
+ * real_timeout = (2^timeout) * 1024 / (socclk_freq)
+ *
+ * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
+ */
+static void
+force_update_wptr_for_self_int(struct amdgpu_device *adev,
+ u32 threshold, u32 timeout, bool enabled)
+{
+ u32 ih_cntl, ih_rb_cntl;
+
+ if (adev->asic_type < CHIP_SIENNA_CICHLID)
+ return;
+
+ ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+
+ ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+ SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
+ ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+ SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_USED_INT_THRESHOLD, threshold);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_USED_INT_THRESHOLD, threshold);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl);
+}
+
+/**
* navi10_ih_enable_interrupts - Enable the interrupt ring buffer
*
* @adev: amdgpu_device pointer
@@ -371,6 +413,8 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
navi10_ih_enable_interrupts(adev);
+ /* enable wptr force update for self int */
+ force_update_wptr_for_self_int(adev, 0, 8, true);
return 0;
}
@@ -384,6 +428,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
*/
static void navi10_ih_irq_disable(struct amdgpu_device *adev)
{
+ force_update_wptr_for_self_int(adev, 0, 8, false);
navi10_ih_disable_interrupts(adev);
/* Wait and acknowledge irq */
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 479991b71295..ea69ae76773e 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -446,6 +446,9 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+ if (adev->asic_type == CHIP_SIENNA_CICHLID)
+ adev->gmc.xgmi.supported = true;
+
/* Set IP register base before any HW register access */
r = nv_reg_base_init(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 77f99811cd85..d488d250805d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -56,7 +56,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
-MODULE_FIRMWARE("amdgpu/sienna_cichlid_asd.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
@@ -179,6 +179,10 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
}
break;
case CHIP_SIENNA_CICHLID:
+ err = psp_init_ta_microcode(&adev->psp, chip_name);
+ if (err)
+ return err;
+ break;
case CHIP_NAVY_FLOUNDER:
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index 9bffbab35041..d55bf64770c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -32,7 +32,6 @@
#include "amdgpu_amdkfd.h"
#include <linux/i2c.h>
#include <linux/pci.h>
-#include "amdgpu_ras.h"
/* error codes */
#define I2C_OK 0
@@ -537,12 +536,12 @@ Fail:
return false;
}
-/***************************** EEPROM I2C GLUE ****************************/
+/***************************** I2C GLUE ****************************/
-static uint32_t smu_v11_0_i2c_eeprom_read_data(struct i2c_adapter *control,
- uint8_t address,
- uint8_t *data,
- uint32_t numbytes)
+static uint32_t smu_v11_0_i2c_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
{
uint32_t ret = 0;
@@ -562,10 +561,10 @@ Fail:
return ret;
}
-static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
- uint8_t address,
- uint8_t *data,
- uint32_t numbytes)
+static uint32_t smu_v11_0_i2c_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
{
uint32_t ret;
@@ -592,14 +591,13 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
struct amdgpu_device *adev = to_amdgpu_device(i2c);
- struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_lock(i2c)) {
DRM_ERROR("Failed to lock the bus from SMU");
return;
}
- control->bus_locked = true;
+ adev->pm.bus_locked = true;
}
static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
@@ -611,14 +609,13 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
struct amdgpu_device *adev = to_amdgpu_device(i2c);
- struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_unlock(i2c)) {
DRM_ERROR("Failed to unlock the bus from SMU");
return;
}
- control->bus_locked = false;
+ adev->pm.bus_locked = false;
}
static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
@@ -627,14 +624,13 @@ static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
.unlock_bus = unlock_bus,
};
-static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+static int smu_v11_0_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
int i, ret;
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
- if (!control->bus_locked) {
+ if (!adev->pm.bus_locked) {
DRM_ERROR("I2C bus unlocked, stopping transaction!");
return -EIO;
}
@@ -643,13 +639,13 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
for (i = 0; i < num; i++) {
if (msgs[i].flags & I2C_M_RD)
- ret = smu_v11_0_i2c_eeprom_read_data(i2c_adap,
- (uint8_t)msgs[i].addr,
- msgs[i].buf, msgs[i].len);
+ ret = smu_v11_0_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ msgs[i].buf, msgs[i].len);
else
- ret = smu_v11_0_i2c_eeprom_write_data(i2c_adap,
- (uint8_t)msgs[i].addr,
- msgs[i].buf, msgs[i].len);
+ ret = smu_v11_0_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ msgs[i].buf, msgs[i].len);
if (ret != I2C_OK) {
num = -EIO;
@@ -661,18 +657,18 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
return num;
}
-static u32 smu_v11_0_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+static u32 smu_v11_0_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static const struct i2c_algorithm smu_v11_0_i2c_eeprom_i2c_algo = {
- .master_xfer = smu_v11_0_i2c_eeprom_i2c_xfer,
- .functionality = smu_v11_0_i2c_eeprom_i2c_func,
+static const struct i2c_algorithm smu_v11_0_i2c_algo = {
+ .master_xfer = smu_v11_0_i2c_xfer,
+ .functionality = smu_v11_0_i2c_func,
};
-int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
+int smu_v11_0_i2c_control_init(struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
int res;
@@ -680,8 +676,8 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
- control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
+ control->algo = &smu_v11_0_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
res = i2c_add_adapter(control);
@@ -691,7 +687,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
return res;
}
-void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control)
+void smu_v11_0_i2c_control_fini(struct i2c_adapter *control)
{
i2c_del_adapter(control);
}
@@ -719,9 +715,9 @@ bool smu_v11_0_i2c_test_bus(struct i2c_adapter *control)
smu_v11_0_i2c_init(control);
/* Write 0xde to address 0x0000 on the EEPROM */
- ret = smu_v11_0_i2c_eeprom_write_data(control, I2C_TARGET_ADDR, data, 6);
+ ret = smu_v11_0_i2c_write_data(control, I2C_TARGET_ADDR, data, 6);
- ret = smu_v11_0_i2c_eeprom_read_data(control, I2C_TARGET_ADDR, data, 6);
+ ret = smu_v11_0_i2c_read_data(control, I2C_TARGET_ADDR, data, 6);
smu_v11_0_i2c_fini(control);
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
index 973f28d68e70..44467c05f642 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
@@ -28,7 +28,7 @@
struct i2c_adapter;
-int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control);
-void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control);
+int smu_v11_0_i2c_control_init(struct i2c_adapter *control);
+void smu_v11_0_i2c_control_fini(struct i2c_adapter *control);
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d8224156460e..533913bf8410 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1376,9 +1376,9 @@ static int dm_late_init(void *handle)
unsigned int linear_lut[16];
int i;
struct dmcu *dmcu = NULL;
- bool ret;
+ bool ret = true;
- if (!adev->dm.fw_dmcu)
+ if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
return detect_mst_link_for_all_connectors(adev->ddev);
dmcu = adev->dm.dc->res_pool->dmcu;
@@ -1397,7 +1397,14 @@ static int dm_late_init(void *handle)
*/
params.min_abm_backlight = 0x28F;
- ret = dmcu_load_iram(dmcu, params);
+ /* In the case where abm is implemented on dmcub,
+ * dmcu object will be null.
+ * ABM 2.4 and up are implemented on dmcub.
+ */
+ if (dmcu)
+ ret = dmcu_load_iram(dmcu, params);
+ else if (adev->dm.dc->ctx->dmub_srv)
+ ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
if (!ret)
return -EINVAL;
@@ -1486,23 +1493,12 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
return 0;
}
- mutex_lock(&smu->mutex);
-
- /* pass data to smu controller */
- if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
- ret = smu_write_watermarks_table(smu);
-
- if (ret) {
- mutex_unlock(&smu->mutex);
- DRM_ERROR("Failed to update WMTABLE!\n");
- return ret;
- }
- smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ ret = smu_write_watermarks_table(smu);
+ if (ret) {
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
}
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -4546,7 +4542,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
&dsc_caps);
#endif
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
@@ -6235,7 +6231,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
- dc_is_dmcu_initialized(adev->dm.dc)) {
+ (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.abm_level_property, 0);
}
@@ -8471,7 +8467,7 @@ cleanup:
*out_type = update_type;
return ret;
}
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
@@ -8494,6 +8490,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
}
+#endif
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
@@ -8547,6 +8544,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->asic_type >= CHIP_NAVI10) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
@@ -8556,7 +8554,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
-
+#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index caf3beaf4b7b..998f729976bf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -33,6 +33,8 @@
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
#include "dmub/dmub_srv.h"
+#include "resource.h"
+#include "dsc.h"
struct dmub_debugfs_trace_header {
uint32_t entry_count;
@@ -817,24 +819,6 @@ unlock:
return res;
}
-/*
- * Returns the min and max vrr vfreq through the connector's debugfs file.
- * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
- */
-static int vrr_range_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq);
- seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq);
-
- return 0;
-}
-
#ifdef CONFIG_DRM_AMD_DC_HDCP
/*
* Returns the HDCP capability of the Display (1.4 for now).
@@ -995,14 +979,517 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
return read_size - r;
}
+static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 10;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_clock_en);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_slice_width);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_slice_height);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_bytes_per_pixel);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_pic_width);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_pic_height);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_chunk_size);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_slice_bpg_offset);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
-DEFINE_SHOW_ATTRIBUTE(vrr_range);
#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
#endif
+static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_clock_en_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_width_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_slice_width_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_slice_height_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_bytes_per_pixel_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_pic_width_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_pic_width_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_pic_height_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_pic_height_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_chunk_size_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_chunk_size_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_slice_bpg_offset_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_link_settings_read,
@@ -1055,14 +1542,21 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"vrr_range", &vrr_range_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
#endif
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
- {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
+ {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops},
+ {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
+ {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
+ {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
+ {"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops},
+ {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
+ {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
+ {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
+ {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}
};
#ifdef CONFIG_DRM_AMD_DC_HDCP
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 0affd1997fe7..e85b58f0f416 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -806,7 +806,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
+ dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 1f94591ce5fb..5cb7b834e459 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1104,10 +1104,6 @@ static inline enum link_training_result perform_link_training_int(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
dpcd_set_training_pattern(link, dpcd_pattern);
- /* delay 5ms after notifying sink of idle pattern before switching output */
- if (link->connector_signal != SIGNAL_TYPE_EDP)
- msleep(5);
-
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -3523,8 +3519,8 @@ static bool retrieve_link_cap(struct dc_link *link)
status = core_link_read_dpcd(
link,
DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
- link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
- sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
}
if (!dpcd_read_sink_ext_caps(link))
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index d6989d115c5c..10d69ada88e3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -244,6 +244,25 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
+#ifndef TRIM_FSFT
+/**
+ * dc_optimize_timing() - dc to optimize timing
+ */
+bool dc_optimize_timing(
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz)
+{
+ //optimization is expected to assing a value to these:
+ //timing->pix_clk_100hz
+ //timing->v_front_porch
+ //timing->v_total
+ //timing->fast_transport_output_rate_100hz;
+ timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
+
+ return true;
+}
+#endif
+
/**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@@ -655,6 +674,17 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
return true;
}
+enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ if (dc->res_pool->funcs->add_dsc_to_stream_resource) {
+ return dc->res_pool->funcs->add_dsc_to_stream_resource(dc, state, stream);
+ } else {
+ return DC_NO_DSC_RESOURCE;
+ }
+}
+
void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
{
DC_LOG_DC(
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e5a1a9eb6217..f50ef4255020 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.94"
+#define DC_VER "3.2.95"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -96,6 +96,9 @@ struct dc_plane_cap {
uint32_t nv12;
uint32_t fp16;
} max_downscale_factor;
+ // minimal width/height
+ uint32_t min_width;
+ uint32_t min_height;
};
// Color management caps (DPP and MPC)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index af177c087d3b..a8a3b0643505 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -726,7 +726,7 @@ union dpcd_dsc_basic_capabilities {
uint8_t raw[16];
};
-union dpcd_dsc_ext_capabilities {
+union dpcd_dsc_branch_decoder_capabilities {
struct {
uint8_t BRANCH_OVERALL_THROUGHPUT_0;
uint8_t BRANCH_OVERALL_THROUGHPUT_1;
@@ -737,7 +737,7 @@ union dpcd_dsc_ext_capabilities {
struct dpcd_dsc_capabilities {
union dpcd_dsc_basic_capabilities dsc_basic_caps;
- union dpcd_dsc_ext_capabilities dsc_ext_caps;
+ union dpcd_dsc_branch_decoder_capabilities dsc_branch_decoder_caps;
};
/* These parameters are from PSR capabilities reported by Sink DPCD */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index b7a8c71e3e39..1a87bc3da826 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -713,6 +713,9 @@ struct dc_crtc_timing_flags {
uint32_t LTE_340MCSC_SCRAMBLE:1;
uint32_t DSC : 1; /* Use DSC with this timing */
+#ifndef TRIM_FSFT
+ uint32_t FAST_TRANSPORT: 1;
+#endif
};
enum dc_timing_3d_format {
@@ -772,6 +775,10 @@ struct dc_crtc_timing {
enum dc_aspect_ratio aspect_ratio;
enum scanning_type scan_type;
+#ifndef TRIM_FSFT
+ uint32_t fast_transport_output_rate_100hz;
+#endif
+
struct dc_crtc_timing_flags flags;
struct dc_dsc_config dsc_cfg;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index f2ed9bc5a319..e4e85a159462 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -363,6 +363,10 @@ bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst);
+enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
bool dc_stream_warmup_writeback(struct dc *dc,
int num_dwb,
struct dc_writeback_info *wb_info);
@@ -419,6 +423,12 @@ struct dc_stream_status *dc_stream_get_status_from_state(
struct dc_stream_status *dc_stream_get_status(
struct dc_stream_state *dc_stream);
+#ifndef TRIM_FSFT
+bool dc_optimize_timing(
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz);
+#endif
+
/*******************************************************************************
* Cursor interfaces - To manages the cursor within a stream
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 9597fc79d7fa..1d5385072a39 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -423,7 +423,9 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
- }
+ },
+ 64,
+ 64
};
static const struct dc_plane_cap underlay_plane_cap = {
@@ -447,7 +449,9 @@ static const struct dc_plane_cap underlay_plane_cap = {
.argb8888 = 1,
.nv12 = 250,
.fp16 = 1
- }
+ },
+ 64,
+ 64
};
#define CTX ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 51b3fe502670..5d83e8174005 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -424,7 +424,9 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
- }
+ },
+ 64,
+ 64
};
#define CTX ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 3c6ecfe141bb..ba50214d6c32 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -157,6 +157,11 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel);
+ REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
+ REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
+ REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
+ REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
+ REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e226647088b9..968a89bbcf24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1043,7 +1043,9 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 1
- }
+ },
+ 16,
+ 16
};
static const struct resource_caps res_cap_nv14 = {
.num_timing_generator = 5,
@@ -3364,6 +3366,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.validate_bandwidth = dcn20_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 99a7ef6ab878..e3984f02b7b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -152,11 +152,11 @@ static void enc2_stream_encoder_update_hdmi_info_packets(
/*Always add mandatory packets first followed by optional ones*/
enc2_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
- enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif);
+ enc2_update_hdmi_info_packet(enc1, 1, &info_frame->hfvsif);
enc2_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
- enc2_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
- enc2_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
- enc2_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
+ enc2_update_hdmi_info_packet(enc1, 3, &info_frame->vendor);
+ enc2_update_hdmi_info_packet(enc1, 4, &info_frame->spd);
+ enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hdrsmd);
}
static void enc2_stream_encoder_stop_hdmi_info_packets(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 61b337267a72..88d41a385add 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -857,7 +857,9 @@ static const struct dc_plane_cap plane_cap = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 250
- }
+ },
+ 64,
+ 64
};
static const struct dc_debug_options debug_defaults_drv = {
@@ -1759,6 +1761,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
.validate_bandwidth = dcn21_validate_bandwidth,
.populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 0c7f247bb7de..8cdaa6eef5d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -747,7 +747,7 @@ done:
return is_dsc_possible;
}
-bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_branch_decoder_caps, struct dsc_dec_dpcd_caps *dsc_sink_caps)
{
if (!dpcd_dsc_basic_data)
return false;
@@ -818,14 +818,14 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
}
/* Extended caps */
- if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST
+ if (dpcd_dsc_branch_decoder_caps == NULL) { // branch decoder DPCD DSC data can be null for non branch device
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
dsc_sink_caps->branch_overall_throughput_1_mps = 0;
dsc_sink_caps->branch_max_line_width = 0;
return true;
}
- dsc_sink_caps->branch_overall_throughput_0_mps = dpcd_dsc_ext_data[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
+ dsc_sink_caps->branch_overall_throughput_0_mps = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
if (dsc_sink_caps->branch_overall_throughput_0_mps == 0)
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
else if (dsc_sink_caps->branch_overall_throughput_0_mps == 1)
@@ -835,7 +835,7 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
dsc_sink_caps->branch_overall_throughput_0_mps += 600;
}
- dsc_sink_caps->branch_overall_throughput_1_mps = dpcd_dsc_ext_data[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
+ dsc_sink_caps->branch_overall_throughput_1_mps = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
if (dsc_sink_caps->branch_overall_throughput_1_mps == 0)
dsc_sink_caps->branch_overall_throughput_1_mps = 0;
else if (dsc_sink_caps->branch_overall_throughput_1_mps == 1)
@@ -845,7 +845,7 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
dsc_sink_caps->branch_overall_throughput_1_mps += 600;
}
- dsc_sink_caps->branch_max_line_width = dpcd_dsc_ext_data[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320;
+ dsc_sink_caps->branch_max_line_width = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320;
ASSERT(dsc_sink_caps->branch_max_line_width == 0 || dsc_sink_caps->branch_max_line_width >= 5120);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 5f985fcbedf1..329395ee7461 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -165,7 +165,9 @@ struct resource_funcs {
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
#endif
-
+ enum dc_status (*add_dsc_to_stream_resource)(
+ struct dc *dc, struct dc_state *state,
+ struct dc_stream_state *stream);
};
struct audio_support{
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index 7c2a3328b208..5915994f9eb8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -56,6 +56,11 @@ struct dcn_dsc_state {
uint32_t dsc_clock_en;
uint32_t dsc_slice_width;
uint32_t dsc_bytes_per_pixel;
+ uint32_t dsc_slice_height;
+ uint32_t dsc_pic_width;
+ uint32_t dsc_pic_height;
+ uint32_t dsc_slice_bpg_offset;
+ uint32_t dsc_chunk_size;
};
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 513a5f8f817e..e013875b89ed 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,10 +36,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xf675c6448
+#define DMUB_FW_VERSION_GIT_HASH 0xe6d590b09
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 24
+#define DMUB_FW_VERSION_REVISION 25
#define DMUB_FW_VERSION_UCODE ((DMUB_FW_VERSION_MAJOR << 24) | (DMUB_FW_VERSION_MINOR << 16) | DMUB_FW_VERSION_REVISION)
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index d3a5ba9ee782..7a2500fbf3f2 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -760,9 +760,35 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
infopacket->valid = true;
}
+#ifndef TRIM_FSFT
+static void build_vrr_infopacket_fast_transport_data(
+ bool ftActive,
+ unsigned int ftOutputRate,
+ struct dc_info_packet *infopacket)
+{
+ /* PB9 : bit7 - fast transport Active*/
+ unsigned char activeBit = (ftActive) ? 1 << 7 : 0;
+
+ infopacket->sb[1] &= ~activeBit; //clear bit
+ infopacket->sb[1] |= activeBit; //set bit
+
+ /* PB13 : Target Output Pixel Rate [kHz] - bits 7:0 */
+ infopacket->sb[13] = ftOutputRate & 0xFF;
+
+ /* PB14 : Target Output Pixel Rate [kHz] - bits 15:8 */
+ infopacket->sb[14] = (ftOutputRate >> 8) & 0xFF;
+
+ /* PB15 : Target Output Pixel Rate [kHz] - bits 23:16 */
+ infopacket->sb[15] = (ftOutputRate >> 16) & 0xFF;
+
+}
+#endif
static void build_vrr_infopacket_v3(enum signal_type signal,
const struct mod_vrr_params *vrr,
+#ifndef TRIM_FSFT
+ bool ftActive, unsigned int ftOutputRate,
+#endif
enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
@@ -773,6 +799,13 @@ static void build_vrr_infopacket_v3(enum signal_type signal,
build_vrr_infopacket_fs2_data(app_tf, infopacket);
+#ifndef TRIM_FSFT
+ build_vrr_infopacket_fast_transport_data(
+ ftActive,
+ ftOutputRate,
+ infopacket);
+#endif
+
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
@@ -795,7 +828,15 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
switch (packet_type) {
case PACKET_TYPE_FS_V3:
+#ifndef TRIM_FSFT
+ build_vrr_infopacket_v3(
+ stream->signal, vrr,
+ stream->timing.flags.FAST_TRANSPORT,
+ stream->timing.fast_transport_output_rate_100hz,
+ app_tf, infopacket);
+#else
build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
+#endif
break;
case PACKET_TYPE_FS_V2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index d27a02ac5f53..e9c48f99f71b 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -35,7 +35,9 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
include $(AMD_POWERPLAY)
-POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o smu_v12_0.o arcturus_ppt.o navi10_ppt.o renoir_ppt.o sienna_cichlid_ppt.o
+POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o \
+ smu_v12_0.o arcturus_ppt.o navi10_ppt.o \
+ renoir_ppt.o sienna_cichlid_ppt.o smu_cmn.o
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 03125c8a2145..838a369c9ec3 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -20,14 +20,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#define SWSMU_CODE_LAYER_L1
+
#include <linux/firmware.h>
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "smu_v11_0.h"
-#include "smu_v12_0.h"
#include "atom.h"
#include "arcturus_ppt.h"
#include "navi10_ppt.h"
@@ -45,196 +45,47 @@
#undef pr_info
#undef pr_debug
-#undef __SMU_DUMMY_MAP
-#define __SMU_DUMMY_MAP(type) #type
-static const char* __smu_message_names[] = {
- SMU_MESSAGE_TYPES
-};
-
-const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
-{
- if (type < 0 || type >= SMU_MSG_MAX_COUNT)
- return "unknown smu message";
- return __smu_message_names[type];
-}
-
-#undef __SMU_DUMMY_MAP
-#define __SMU_DUMMY_MAP(fea) #fea
-static const char* __smu_feature_names[] = {
- SMU_FEATURE_MASKS
-};
-
-const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
-{
- if (feature < 0 || feature >= SMU_FEATURE_COUNT)
- return "unknown smu feature";
- return __smu_feature_names[feature];
-}
-
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
{
size_t size = 0;
- int ret = 0, i = 0;
- uint32_t feature_mask[2] = { 0 };
- int32_t feature_index = 0;
- uint32_t count = 0;
- uint32_t sort_feature[SMU_FEATURE_COUNT];
- uint64_t hw_feature_count = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- goto failed;
-
- size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
- feature_mask[1], feature_mask[0]);
-
- for (i = 0; i < SMU_FEATURE_COUNT; i++) {
- feature_index = smu_feature_get_index(smu, i);
- if (feature_index < 0)
- continue;
- sort_feature[feature_index] = i;
- hw_feature_count++;
- }
+ size = smu_get_pp_feature_mask(smu, buf);
- for (i = 0; i < hw_feature_count; i++) {
- size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
- count++,
- smu_get_feature_name(smu, sort_feature[i]),
- i,
- !!smu_feature_is_enabled(smu, sort_feature[i]) ?
- "enabled" : "disabled");
- }
-
-failed:
mutex_unlock(&smu->mutex);
return size;
}
-static int smu_feature_update_enable_state(struct smu_context *smu,
- uint64_t feature_mask,
- bool enabled)
-{
- struct smu_feature *feature = &smu->smu_feature;
- int ret = 0;
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_EnableSmuFeaturesLow,
- lower_32_bits(feature_mask),
- NULL);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_EnableSmuFeaturesHigh,
- upper_32_bits(feature_mask),
- NULL);
- if (ret)
- return ret;
- } else {
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_DisableSmuFeaturesLow,
- lower_32_bits(feature_mask),
- NULL);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_DisableSmuFeaturesHigh,
- upper_32_bits(feature_mask),
- NULL);
- if (ret)
- return ret;
- }
-
- mutex_lock(&feature->mutex);
- if (enabled)
- bitmap_or(feature->enabled, feature->enabled,
- (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- else
- bitmap_andnot(feature->enabled, feature->enabled,
- (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
-
- return ret;
-}
-
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
{
int ret = 0;
- uint32_t feature_mask[2] = { 0 };
- uint64_t feature_2_enabled = 0;
- uint64_t feature_2_disabled = 0;
- uint64_t feature_enables = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- goto out;
-
- feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
-
- feature_2_enabled = ~feature_enables & new_mask;
- feature_2_disabled = feature_enables & ~new_mask;
+ ret = smu_set_pp_feature_mask(smu, new_mask);
- if (feature_2_enabled) {
- ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
- if (ret)
- goto out;
- }
- if (feature_2_disabled) {
- ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
- if (ret)
- goto out;
- }
-
-out:
mutex_unlock(&smu->mutex);
return ret;
}
-int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
+int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
{
int ret = 0;
+ struct smu_context *smu = &adev->smu;
- if (!if_version && !smu_version)
- return -EINVAL;
-
- if (smu->smc_fw_if_version && smu->smc_fw_version)
- {
- if (if_version)
- *if_version = smu->smc_fw_if_version;
-
- if (smu_version)
- *smu_version = smu->smc_fw_version;
-
- return 0;
- }
-
- if (if_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
- if (ret)
- return ret;
-
- smu->smc_fw_if_version = *if_version;
- }
-
- if (smu_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
- if (ret)
- return ret;
-
- smu->smc_fw_version = *smu_version;
- }
+ if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
+ *value = smu_get_gfx_off_status(smu);
+ else
+ ret = -EINVAL;
return ret;
}
@@ -246,9 +97,6 @@ int smu_set_soft_freq_range(struct smu_context *smu,
{
int ret = 0;
- if (!smu_clk_dpm_is_enabled(smu, clk_type))
- return 0;
-
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_soft_freq_limited_range)
@@ -285,33 +133,6 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
return ret;
}
-bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
-{
- enum smu_feature_mask feature_id = 0;
-
- switch (clk_type) {
- case SMU_MCLK:
- case SMU_UCLK:
- feature_id = SMU_FEATURE_DPM_UCLK_BIT;
- break;
- case SMU_GFXCLK:
- case SMU_SCLK:
- feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
- break;
- case SMU_SOCCLK:
- feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
- break;
- default:
- return true;
- }
-
- if(!smu_feature_is_enabled(smu, feature_id)) {
- return false;
- }
-
- return true;
-}
-
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -386,45 +207,6 @@ int smu_get_power_num_states(struct smu_context *smu,
return 0;
}
-int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
- void *table_data, bool drv2smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
- struct amdgpu_device *adev = smu->adev;
- struct smu_table *table = &smu_table->driver_table;
- int table_id = smu_table_get_index(smu, table_index);
- uint32_t table_size;
- int ret = 0;
- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
- return -EINVAL;
-
- table_size = smu_table->tables[table_index].size;
-
- if (drv2smu) {
- memcpy(table->cpu_addr, table_data, table_size);
- /*
- * Flush hdp cache: to guard the content seen by
- * GPU is consitent with CPU.
- */
- amdgpu_asic_flush_hdp(adev, NULL);
- }
-
- ret = smu_send_smc_msg_with_param(smu, drv2smu ?
- SMU_MSG_TransferTableDram2Smu :
- SMU_MSG_TransferTableSmu2Dram,
- table_id | ((argument & 0xFFFF) << 16),
- NULL);
- if (ret)
- return ret;
-
- if (!drv2smu) {
- amdgpu_asic_flush_hdp(adev, NULL);
- memcpy(table_data, table->cpu_addr, table_size);
- }
-
- return ret;
-}
-
bool is_support_sw_smu(struct amdgpu_device *adev)
{
if (adev->asic_type >= CHIP_ARCTURUS)
@@ -525,63 +307,6 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
return ret;
}
-int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
-{
- struct smu_feature *feature = &smu->smu_feature;
- int feature_id;
- int ret = 0;
-
- if (smu->is_apu)
- return 1;
- feature_id = smu_feature_get_index(smu, mask);
- if (feature_id < 0)
- return 0;
-
- WARN_ON(feature_id > feature->feature_num);
-
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->enabled);
- mutex_unlock(&feature->mutex);
-
- return ret;
-}
-
-int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
- bool enable)
-{
- struct smu_feature *feature = &smu->smu_feature;
- int feature_id;
-
- feature_id = smu_feature_get_index(smu, mask);
- if (feature_id < 0)
- return -EINVAL;
-
- WARN_ON(feature_id > feature->feature_num);
-
- return smu_feature_update_enable_state(smu,
- 1ULL << feature_id,
- enable);
-}
-
-int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
-{
- struct smu_feature *feature = &smu->smu_feature;
- int feature_id;
- int ret = 0;
-
- feature_id = smu_feature_get_index(smu, mask);
- if (feature_id < 0)
- return 0;
-
- WARN_ON(feature_id > feature->feature_num);
-
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->supported);
- mutex_unlock(&feature->mutex);
-
- return ret;
-}
-
static int smu_set_funcs(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
@@ -676,22 +401,6 @@ static int smu_late_init(void *handle)
return 0;
}
-int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
- uint16_t *size, uint8_t *frev, uint8_t *crev,
- uint8_t **addr)
-{
- struct amdgpu_device *adev = smu->adev;
- uint16_t data_start;
-
- if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
- size, frev, crev, &data_start))
- return -EINVAL;
-
- *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
-
- return 0;
-}
-
static int smu_init_fb_allocations(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1135,7 +844,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
- ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+ ret = smu_i2c_init(smu, &adev->pm.smu_i2c);
if (ret)
return ret;
@@ -1280,7 +989,6 @@ static int smu_hw_init(void *handle)
static int smu_disable_dpms(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- uint64_t features_to_disable;
int ret = 0;
bool use_baco = !smu->is_apu &&
((adev->in_gpu_reset &&
@@ -1316,11 +1024,8 @@ static int smu_disable_dpms(struct smu_context *smu)
* BACO feature has to be kept enabled.
*/
if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
- features_to_disable = U64_MAX &
- ~(1ULL << smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT));
- ret = smu_feature_update_enable_state(smu,
- features_to_disable,
- 0);
+ ret = smu_disable_all_features_with_exception(smu,
+ SMU_FEATURE_BACO_BIT);
if (ret)
dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
} else {
@@ -1341,7 +1046,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+ smu_i2c_fini(smu, &adev->pm.smu_i2c);
cancel_work_sync(&smu->throttling_logging_work);
@@ -1884,12 +1589,6 @@ int smu_set_mp1_state(struct smu_context *smu,
return 0;
}
- /* some asics may not support those messages */
- if (smu_msg_get_index(smu, msg) < 0) {
- mutex_unlock(&smu->mutex);
- return 0;
- }
-
ret = smu_send_smc_msg(smu, msg, NULL);
if (ret)
dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
@@ -1944,35 +1643,34 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
int smu_write_watermarks_table(struct smu_context *smu)
{
- void *watermarks_table = smu->smu_table.watermarks_table;
+ int ret = 0;
- if (!watermarks_table)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&smu->mutex);
- return smu_update_table(smu,
- SMU_TABLE_WATERMARKS,
- 0,
- watermarks_table,
- true);
+ ret = smu_set_watermarks_table(smu, NULL);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
}
int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
- void *table = smu->smu_table.watermarks_table;
+ int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- if (!table)
- return -EINVAL;
-
mutex_lock(&smu->mutex);
if (!smu->disable_watermark &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- smu_set_watermarks_table(smu, table, clock_ranges);
+ ret = smu_set_watermarks_table(smu, clock_ranges);
if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
smu->watermarks_bitmap |= WATERMARKS_EXIST;
@@ -1982,7 +1680,7 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
mutex_unlock(&smu->mutex);
- return 0;
+ return ret;
}
int smu_set_ac_dc(struct smu_context *smu)
@@ -2216,8 +1914,14 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->od_edit_dpm_table)
+ if (smu->ppt_funcs->od_edit_dpm_table) {
ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+ if (!ret && (type == PP_OD_COMMIT_DPM_TABLE))
+ ret = smu_handle_task(smu,
+ smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+ }
mutex_unlock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 56dc20a617fd..3b9182c8c53f 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -21,12 +21,14 @@
*
*/
+#define SWSMU_CODE_LAYER_L2
+
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_arcturus.h"
#include "soc15_common.h"
@@ -43,6 +45,7 @@
#include <linux/i2c.h>
#include <linux/pci.h>
#include "amdgpu_ras.h"
+#include "smu_cmn.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -56,8 +59,6 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-#define MSG_MAP(msg, index, valid_in_vf) \
- [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
#define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
[smu_feature] = {1, (arcturus_feature)}
@@ -78,7 +79,7 @@
/* possible frequency drift (1Mhz) */
#define EPSILON 1
-static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
+static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
@@ -141,7 +142,7 @@ static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(ReadSerialNumBottom32, PPSMC_MSG_ReadSerialNumBottom32, 1),
};
-static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
+static const struct cmn2asic_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(SCLK, PPCLK_GFXCLK),
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
@@ -152,7 +153,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(VCLK, PPCLK_VCLK),
};
-static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
+static const struct cmn2asic_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_UCLK),
@@ -181,7 +182,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_CO
FEA_MAP(TEMP_DEPENDENT_VMIN),
};
-static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
+static const struct cmn2asic_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE),
TAB_MAP(AVFS),
TAB_MAP(AVFS_PSM_DEBUG),
@@ -194,12 +195,12 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF),
};
-static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static const struct cmn2asic_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC),
PWR_MAP(DC),
};
-static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+static const struct cmn2asic_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
@@ -207,103 +208,10 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFI
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
-static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_msg_mapping mapping;
-
- if (index >= SMU_MSG_MAX_COUNT)
- return -EINVAL;
-
- mapping = arcturus_message_map[index];
- if (!(mapping.valid_mapping))
- return -EINVAL;
-
- if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
- return -EACCES;
-
- return mapping.map_to;
-}
-
-static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_CLK_COUNT)
- return -EINVAL;
-
- mapping = arcturus_clk_map[index];
- if (!(mapping.valid_mapping)) {
- dev_warn(smc->adev->dev, "Unsupported SMU clk: %d\n", index);
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_FEATURE_COUNT)
- return -EINVAL;
-
- mapping = arcturus_feature_mask_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_TABLE_COUNT)
- return -EINVAL;
-
- mapping = arcturus_table_map[index];
- if (!(mapping.valid_mapping)) {
- dev_warn(smc->adev->dev, "Unsupported SMU table: %d\n", index);
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_POWER_SOURCE_COUNT)
- return -EINVAL;
-
- mapping = arcturus_pwr_src_map[index];
- if (!(mapping.valid_mapping)) {
- dev_warn(smc->adev->dev, "Unsupported SMU power source: %d\n", index);
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
- return -EINVAL;
-
- mapping = arcturus_workload_map[profile];
- if (!(mapping.valid_mapping))
- return -EINVAL;
-
- return mapping.map_to;
-}
-
-static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int arcturus_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -352,6 +260,21 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu)
return 0;
}
+static int arcturus_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = arcturus_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = arcturus_allocate_dpm_context(smu);
+ if (ret)
+ return ret;
+
+ return smu_v11_0_init_smc_tables(smu);
+}
+
static int
arcturus_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
@@ -374,7 +297,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_SOCCLK,
dpm_table);
@@ -392,7 +315,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_GFXCLK,
dpm_table);
@@ -410,7 +333,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
/* memclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_UCLK,
dpm_table);
@@ -428,7 +351,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
/* fclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.fclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_FCLK,
dpm_table);
@@ -488,7 +411,7 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
smc_dpm_info);
- ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
(uint8_t **)&smc_dpm_table);
if (ret)
return ret;
@@ -533,13 +456,13 @@ static int arcturus_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
if (ret) {
dev_err(smu->adev->dev, "RunAfllBtc failed!\n");
return ret;
}
- return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -621,7 +544,7 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
if (!smu_table->metrics_time ||
time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_SMU_METRICS,
0,
smu_table->metrics_table,
@@ -730,7 +653,9 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
if (!value)
return -EINVAL;
- clk_id = smu_clk_get_index(smu, clk_type);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0)
return -EINVAL;
@@ -742,31 +667,31 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
* We can use Average_* for dpm disabled case.
* But this is available for gfxclk/uclk/socclk/vclk/dclk.
*/
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
member_type = METRICS_CURR_GFXCLK;
else
member_type = METRICS_AVERAGE_GFXCLK;
break;
case PPCLK_UCLK:
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
member_type = METRICS_CURR_UCLK;
else
member_type = METRICS_AVERAGE_UCLK;
break;
case PPCLK_SOCCLK:
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
member_type = METRICS_CURR_SOCCLK;
else
member_type = METRICS_AVERAGE_SOCCLK;
break;
case PPCLK_VCLK:
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
member_type = METRICS_CURR_VCLK;
else
member_type = METRICS_AVERAGE_VCLK;
break;
case PPCLK_DCLK:
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
member_type = METRICS_CURR_DCLK;
else
member_type = METRICS_AVERAGE_DCLK;
@@ -912,10 +837,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
uint32_t freq;
int ret = 0;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_GFXCLK << 16) | (freq & 0xffff),
NULL);
@@ -926,10 +851,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
}
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
(feature_mask & FEATURE_DPM_UCLK_MASK)) {
freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_UCLK << 16) | (freq & 0xffff),
NULL);
@@ -940,10 +865,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
}
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
(PPCLK_SOCCLK << 16) | (freq & 0xffff),
NULL);
@@ -966,7 +891,7 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
uint32_t smu_version;
int ret = 0;
- ret = smu_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret) {
dev_err(smu->adev->dev, "Failed to get smu version!\n");
return ret;
@@ -1283,7 +1208,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
if (!buf)
return -EINVAL;
- result = smu_get_smc_version(smu, NULL, &smu_version);
+ result = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (result)
return result;
@@ -1300,12 +1225,14 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on arcturus.
*/
- workload_type = smu_workload_get_type(smu, i);
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
if (workload_type < 0)
continue;
if (smu_version >= 0x360d00) {
- result = smu_update_table(smu,
+ result = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
workload_type,
(void *)(&activity_monitor),
@@ -1368,13 +1295,13 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return -EINVAL;
}
- ret = smu_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret)
return ret;
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
(smu_version >=0x360d00)) {
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor),
@@ -1409,7 +1336,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
break;
}
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor),
@@ -1424,13 +1351,15 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on arcturus.
*/
- workload_type = smu_workload_get_type(smu, profile_mode);
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
if (workload_type < 0) {
dev_err(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
return -EINVAL;
}
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
1 << workload_type,
NULL);
@@ -1450,7 +1379,7 @@ static int arcturus_set_performance_level(struct smu_context *smu,
uint32_t smu_version;
int ret;
- ret = smu_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret) {
dev_err(smu->adev->dev, "Failed to get smu version!\n");
return ret;
@@ -1912,7 +1841,7 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
unsigned long feature_enabled;
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
((uint64_t)feature_mask[1] << 32));
return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1925,8 +1854,8 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
int ret = 0;
if (enable) {
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
if (ret) {
dev_err(smu->adev->dev, "[EnableVCNDPM] failed!\n");
return ret;
@@ -1934,8 +1863,8 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
}
power_gate->vcn_gated = false;
} else {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
if (ret) {
dev_err(smu->adev->dev, "[DisableVCNDPM] failed!\n");
return ret;
@@ -1947,14 +1876,12 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
return ret;
}
-static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
+static void arcturus_fill_i2c_req(SwI2cRequest_t *req, bool write,
uint8_t address, uint32_t numbytes,
uint8_t *data)
{
int i;
- BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
-
req->I2CcontrollerPort = 0;
req->I2CSpeed = 2;
req->SlaveAddress = address;
@@ -1981,7 +1908,7 @@ static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
}
}
-static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
+static int arcturus_i2c_read_data(struct i2c_adapter *control,
uint8_t address,
uint8_t *data,
uint32_t numbytes)
@@ -1992,12 +1919,18 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
struct smu_table_context *smu_table = &adev->smu.smu_table;
struct smu_table *table = &smu_table->driver_table;
+ if (numbytes > MAX_SW_I2C_COMMANDS) {
+ dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+ numbytes, MAX_SW_I2C_COMMANDS);
+ return -EINVAL;
+ }
+
memset(&req, 0, sizeof(req));
- arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
+ arcturus_fill_i2c_req(&req, false, address, numbytes, data);
mutex_lock(&adev->smu.mutex);
/* Now read data starting with that address */
- ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
true);
mutex_unlock(&adev->smu.mutex);
@@ -2008,18 +1941,18 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
for (i = 0; i < numbytes; i++)
data[i] = res->SwI2cCmds[i].Data;
- dev_dbg(adev->dev, "arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :",
+ dev_dbg(adev->dev, "arcturus_i2c_read_data, address = %x, bytes = %d, data :",
(uint16_t)address, numbytes);
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
8, 1, data, numbytes, false);
} else
- dev_err(adev->dev, "arcturus_i2c_eeprom_read_data - error occurred :%x", ret);
+ dev_err(adev->dev, "arcturus_i2c_read_data - error occurred :%x", ret);
return ret;
}
-static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
+static int arcturus_i2c_write_data(struct i2c_adapter *control,
uint8_t address,
uint8_t *data,
uint32_t numbytes)
@@ -2028,11 +1961,17 @@ static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
SwI2cRequest_t req;
struct amdgpu_device *adev = to_amdgpu_device(control);
+ if (numbytes > MAX_SW_I2C_COMMANDS) {
+ dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+ numbytes, MAX_SW_I2C_COMMANDS);
+ return -EINVAL;
+ }
+
memset(&req, 0, sizeof(req));
- arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data);
+ arcturus_fill_i2c_req(&req, true, address, numbytes, data);
mutex_lock(&adev->smu.mutex);
- ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
mutex_unlock(&adev->smu.mutex);
if (!ret) {
@@ -2055,7 +1994,7 @@ static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
return ret;
}
-static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
@@ -2078,18 +2017,18 @@ static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
data_chunk[1] = (next_eeprom_addr & 0xff);
if (msgs[i].flags & I2C_M_RD) {
- ret = arcturus_i2c_eeprom_read_data(i2c_adap,
- (uint8_t)msgs[i].addr,
- data_chunk, MAX_SW_I2C_COMMANDS);
+ ret = arcturus_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
memcpy(data_ptr, data_chunk + 2, data_chunk_size);
} else {
memcpy(data_chunk + 2, data_ptr, data_chunk_size);
- ret = arcturus_i2c_eeprom_write_data(i2c_adap,
- (uint8_t)msgs[i].addr,
- data_chunk, MAX_SW_I2C_COMMANDS);
+ ret = arcturus_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
}
if (ret) {
@@ -2106,17 +2045,17 @@ static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
data_chunk[1] = (next_eeprom_addr & 0xff);
if (msgs[i].flags & I2C_M_RD) {
- ret = arcturus_i2c_eeprom_read_data(i2c_adap,
- (uint8_t)msgs[i].addr,
- data_chunk, (data_size % data_chunk_size) + 2);
+ ret = arcturus_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
} else {
memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
- ret = arcturus_i2c_eeprom_write_data(i2c_adap,
- (uint8_t)msgs[i].addr,
- data_chunk, (data_size % data_chunk_size) + 2);
+ ret = arcturus_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
}
if (ret) {
@@ -2130,15 +2069,15 @@ fail:
return num;
}
-static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+static u32 arcturus_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
- .master_xfer = arcturus_i2c_eeprom_i2c_xfer,
- .functionality = arcturus_i2c_eeprom_i2c_func,
+static const struct i2c_algorithm arcturus_i2c_algo = {
+ .master_xfer = arcturus_i2c_xfer,
+ .functionality = arcturus_i2c_func,
};
static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
@@ -2148,7 +2087,7 @@ static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
return control->dev.parent == &adev->pdev->dev;
}
-static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
int res;
@@ -2160,8 +2099,8 @@ static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
- control->algo = &arcturus_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
+ control->algo = &arcturus_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
res = i2c_add_adapter(control);
if (res)
@@ -2170,7 +2109,7 @@ static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_
return res;
}
-static void arcturus_i2c_eeprom_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
{
if (!arcturus_i2c_adapter_is_added(control))
return;
@@ -2184,7 +2123,7 @@ static void arcturus_get_unique_id(struct smu_context *smu)
uint32_t top32 = 0, bottom32 = 0, smu_version;
uint64_t id;
- if (smu_get_smc_version(smu, NULL, &smu_version)) {
+ if (smu_cmn_get_smc_version(smu, NULL, &smu_version)) {
dev_warn(adev->dev, "Failed to get smu version, cannot get unique_id or serial_number\n");
return;
}
@@ -2196,8 +2135,8 @@ static void arcturus_get_unique_id(struct smu_context *smu)
}
/* Get the SN to turn into a Unique ID */
- smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32);
- smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32);
+ smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32);
+ smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32);
id = ((uint64_t)bottom32 << 32) | top32;
adev->unique_id = id;
@@ -2225,7 +2164,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
uint32_t smu_version;
int ret;
- ret = smu_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret) {
dev_err(smu->adev->dev, "Failed to get smu version!\n");
return ret;
@@ -2237,7 +2176,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
return -EINVAL;
}
- return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
@@ -2245,7 +2184,7 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
uint32_t smu_version;
int ret;
- ret = smu_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret) {
dev_err(smu->adev->dev, "Failed to get smu version!\n");
return ret;
@@ -2258,12 +2197,12 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
}
if (en)
- return smu_send_smc_msg_with_param(smu,
+ return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl,
1,
NULL);
- return smu_send_smc_msg_with_param(smu,
+ return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl,
0,
NULL);
@@ -2315,16 +2254,6 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
}
static const struct pptable_funcs arcturus_ppt_funcs = {
- /* translate smu index into arcturus specific index */
- .get_smu_msg_index = arcturus_get_smu_msg_index,
- .get_smu_clk_index = arcturus_get_smu_clk_index,
- .get_smu_feature_index = arcturus_get_smu_feature_index,
- .get_smu_table_index = arcturus_get_smu_table_index,
- .get_smu_power_index= arcturus_get_pwr_src_index,
- .get_workload_type = arcturus_get_workload_type,
- /* internal structurs allocations */
- .tables_init = arcturus_tables_init,
- .alloc_dpm_context = arcturus_allocate_dpm_context,
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
/* btc */
@@ -2346,13 +2275,13 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_power_limit = arcturus_get_power_limit,
.is_dpm_running = arcturus_is_dpm_running,
.dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable,
- .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
- .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+ .i2c_init = arcturus_i2c_control_init,
+ .i2c_fini = arcturus_i2c_control_fini,
.get_unique_id = arcturus_get_unique_id,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
.fini_microcode = smu_v11_0_fini_microcode,
- .init_smc_tables = smu_v11_0_init_smc_tables,
+ .init_smc_tables = arcturus_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
@@ -2361,15 +2290,18 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.setup_pptable = arcturus_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
.check_fw_version = smu_v11_0_check_fw_version,
- .write_pptable = smu_v11_0_write_pptable,
+ .write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
- .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = NULL,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
- .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
.notify_display_change = NULL,
.set_power_limit = smu_v11_0_set_power_limit,
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
@@ -2396,9 +2328,17 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_df_cstate = arcturus_set_df_cstate,
.allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
.log_thermal_throttling_event = arcturus_log_thermal_throttling_event,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &arcturus_ppt_funcs;
+ smu->message_map = arcturus_message_map;
+ smu->clock_map = arcturus_clk_map;
+ smu->feature_map = arcturus_feature_mask_map;
+ smu->table_map = arcturus_table_map;
+ smu->pwr_src_map = arcturus_pwr_src_map;
+ smu->workload_map = arcturus_workload_map;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 70181ba7ee0c..28312d6dc187 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -259,7 +259,7 @@ struct smu_table_context
void *max_sustainable_clocks;
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
- struct smu_table *tables;
+ struct smu_table tables[SMU_TABLE_COUNT];
/*
* The driver table is just a staging buffer for
* uploading/downloading content from the SMU.
@@ -366,6 +366,17 @@ struct smu_umd_pstate_table {
struct pstates_clk_freq dclk_pstate;
};
+struct cmn2asic_msg_mapping {
+ int valid_mapping;
+ int map_to;
+ int valid_in_vf;
+};
+
+struct cmn2asic_mapping {
+ int valid_mapping;
+ int map_to;
+};
+
#define WORKLOAD_POLICY_MAX 7
struct smu_context
{
@@ -373,6 +384,12 @@ struct smu_context
struct amdgpu_irq_src irq_source;
const struct pptable_funcs *ppt_funcs;
+ const struct cmn2asic_msg_mapping *message_map;
+ const struct cmn2asic_mapping *clock_map;
+ const struct cmn2asic_mapping *feature_map;
+ const struct cmn2asic_mapping *table_map;
+ const struct cmn2asic_mapping *pwr_src_map;
+ const struct cmn2asic_mapping *workload_map;
struct mutex mutex;
struct mutex sensor_lock;
struct mutex metrics_lock;
@@ -434,13 +451,6 @@ struct smu_context
struct i2c_adapter;
struct pptable_funcs {
- int (*alloc_dpm_context)(struct smu_context *smu);
- int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
- int (*get_smu_clk_index)(struct smu_context *smu, uint32_t index);
- int (*get_smu_feature_index)(struct smu_context *smu, uint32_t index);
- int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
- int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
- int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
int (*run_btc)(struct smu_context *smu);
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
@@ -479,10 +489,9 @@ struct pptable_funcs {
int (*notify_smc_display_config)(struct smu_context *smu);
int (*set_cpu_power_state)(struct smu_context *smu);
bool (*is_dpm_running)(struct smu_context *smu);
- int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
- int (*set_watermarks_table)(struct smu_context *smu, void *watermarks,
+ int (*set_watermarks_table)(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
@@ -494,8 +503,8 @@ struct pptable_funcs {
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
- int (*i2c_eeprom_init)(struct smu_context *smu, struct i2c_adapter *control);
- void (*i2c_eeprom_fini)(struct smu_context *smu, struct i2c_adapter *control);
+ int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+ void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
void (*get_unique_id)(struct smu_context *smu);
int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
int (*init_microcode)(struct smu_context *smu);
@@ -519,9 +528,14 @@ struct pptable_funcs {
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg_with_param)(struct smu_context *smu,
enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+ int (*send_smc_msg)(struct smu_context *smu,
+ enum smu_message_type msg,
+ uint32_t *read_arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu);
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
+ int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
int (*notify_display_change)(struct smu_context *smu);
int (*set_power_limit)(struct smu_context *smu, uint32_t n);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
@@ -555,6 +569,7 @@ struct pptable_funcs {
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
int (*gfx_off_control)(struct smu_context *smu, bool enable);
+ uint32_t (*get_gfx_off_status)(struct smu_context *smu);
int (*register_irq_handler)(struct smu_context *smu);
int (*set_azalia_d3_pme)(struct smu_context *smu);
int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
@@ -571,6 +586,8 @@ struct pptable_funcs {
int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
void (*log_thermal_throttling_event)(struct smu_context *smu);
+ size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
+ int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
};
typedef enum {
@@ -603,6 +620,40 @@ typedef enum {
METRICS_CURR_FANSPEED,
} MetricsMember_t;
+enum smu_cmn2asic_mapping_type {
+ CMN2ASIC_MAPPING_MSG,
+ CMN2ASIC_MAPPING_CLK,
+ CMN2ASIC_MAPPING_FEATURE,
+ CMN2ASIC_MAPPING_TABLE,
+ CMN2ASIC_MAPPING_PWR,
+ CMN2ASIC_MAPPING_WORKLOAD,
+};
+
+#define MSG_MAP(msg, index, valid_in_vf) \
+ [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
+
+#define CLK_MAP(clk, index) \
+ [SMU_##clk] = {1, (index)}
+
+#define FEA_MAP(fea) \
+ [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
+
+#define TAB_MAP(tab) \
+ [SMU_TABLE_##tab] = {1, TABLE_##tab}
+
+#define TAB_MAP_VALID(tab) \
+ [SMU_TABLE_##tab] = {1, TABLE_##tab}
+
+#define TAB_MAP_INVALID(tab) \
+ [SMU_TABLE_##tab] = {0, TABLE_##tab}
+
+#define PWR_MAP(tab) \
+ [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
+
+#define WORKLOAD_MAP(profile, workload) \
+ [profile] = {1, (workload)}
+
+#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_load_microcode(struct smu_context *smu);
int smu_check_fw_status(struct smu_context *smu);
@@ -678,25 +729,11 @@ bool smu_mode1_reset_is_support(struct smu_context *smu);
int smu_mode1_reset(struct smu_context *smu);
int smu_mode2_reset(struct smu_context *smu);
-extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
- uint16_t *size, uint8_t *frev, uint8_t *crev,
- uint8_t **addr);
-
extern const struct amd_ip_funcs smu_ip_funcs;
extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
-extern int smu_feature_is_enabled(struct smu_context *smu,
- enum smu_feature_mask mask);
-extern int smu_feature_set_enabled(struct smu_context *smu,
- enum smu_feature_mask mask, bool enable);
-extern int smu_feature_is_supported(struct smu_context *smu,
- enum smu_feature_mask mask);
-
-int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
- void *table_data, bool drv2smu);
-
bool is_support_sw_smu(struct amdgpu_device *adev);
int smu_reset(struct smu_context *smu);
int smu_sys_get_pp_table(struct smu_context *smu, void **table);
@@ -722,7 +759,6 @@ extern int smu_handle_task(struct smu_context *smu,
int smu_switch_power_profile(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE type,
bool en);
-int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -731,9 +767,6 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
int smu_set_ac_dc(struct smu_context *smu);
-bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
-const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
-const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
int smu_force_clk_levels(struct smu_context *smu,
@@ -755,4 +788,7 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table);
+int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+
+#endif
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index f06158511f93..429f5aa8924a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -31,7 +31,7 @@
#define SMU11_DRIVER_IF_VERSION_NV12 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2B
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -52,21 +52,6 @@
#define MAX_DPM_LEVELS 16
#define MAX_PCIE_CONF 2
-#define CLK_MAP(clk, index) \
- [SMU_##clk] = {1, (index)}
-
-#define FEA_MAP(fea) \
- [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
-
-#define TAB_MAP(tab) \
- [SMU_TABLE_##tab] = {1, TABLE_##tab}
-
-#define PWR_MAP(tab) \
- [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
-
-#define WORKLOAD_MAP(profile, workload) \
- [profile] = {1, (workload)}
-
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
#define CTF_OFFSET_MEM 5
@@ -77,17 +62,6 @@ static const struct smu_temperature_range smu11_thermal_policy[] =
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
-struct smu_11_0_msg_mapping {
- int valid_mapping;
- int map_to;
- int valid_in_vf;
-};
-
-struct smu_11_0_cmn2aisc_mapping {
- int valid_mapping;
- int map_to;
-};
-
struct smu_11_0_max_sustainable_clocks {
uint32_t display_clock;
uint32_t phy_clock;
@@ -160,6 +134,8 @@ enum smu_v11_0_baco_seq {
BACO_SEQ_COUNT,
};
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
+
int smu_v11_0_init_microcode(struct smu_context *smu);
void smu_v11_0_fini_microcode(struct smu_context *smu);
@@ -182,8 +158,6 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
int smu_v11_0_check_fw_version(struct smu_context *smu);
-int smu_v11_0_write_pptable(struct smu_context *smu);
-
int smu_v11_0_set_driver_table_location(struct smu_context *smu);
int smu_v11_0_set_tool_table_location(struct smu_context *smu);
@@ -193,19 +167,10 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v11_0_system_features_control(struct smu_context *smu,
bool en);
-int
-smu_v11_0_send_msg_with_param(struct smu_context *smu,
- enum smu_message_type msg,
- uint32_t param,
- uint32_t *read_arg);
-
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
int smu_v11_0_set_allowed_mask(struct smu_context *smu);
-int smu_v11_0_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num);
-
int smu_v11_0_notify_display_change(struct smu_context *smu);
int smu_v11_0_get_current_power_limit(struct smu_context *smu,
@@ -300,3 +265,4 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
uint32_t *max_value);
#endif
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index fd83a723f32c..02de3b6199e5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -31,22 +31,7 @@
#define MP1_Public 0x03b00000
#define MP1_SRAM 0x03c00004
-
-struct smu_12_0_cmn2aisc_mapping {
- int valid_mapping;
- int map_to;
-};
-
-int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
- uint16_t msg);
-
-int smu_v12_0_wait_for_response(struct smu_context *smu);
-
-int
-smu_v12_0_send_msg_with_param(struct smu_context *smu,
- enum smu_message_type msg,
- uint32_t param,
- uint32_t *read_arg);
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
int smu_v12_0_check_fw_status(struct smu_context *smu);
@@ -64,15 +49,10 @@ uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
-int smu_v12_0_init_smc_tables(struct smu_context *smu);
-
int smu_v12_0_fini_smc_tables(struct smu_context *smu);
int smu_v12_0_set_default_dpm_tables(struct smu_context *smu);
-int smu_v12_0_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num);
-
int smu_v12_0_mode2_reset(struct smu_context *smu);
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -81,3 +61,4 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
#endif
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index ead135f39c7e..6aaf483858a0 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -21,13 +21,16 @@
*
*/
+#define SWSMU_CODE_LAYER_L2
+
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <linux/i2c.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_navi10.h"
@@ -41,6 +44,7 @@
#include "thm/thm_11_0_2_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
+#include "smu_cmn.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -52,6 +56,8 @@
#undef pr_info
#undef pr_debug
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -63,10 +69,7 @@
FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
-#define MSG_MAP(msg, index, valid_in_vf) \
- [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
-
-static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
+static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
@@ -137,7 +140,7 @@ static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0),
};
-static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
+static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(SCLK, PPCLK_GFXCLK),
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
@@ -152,7 +155,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(PHYCLK, PPCLK_PHYCLK),
};
-static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_GFX_PACE),
@@ -198,7 +201,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
FEA_MAP(APCC_DFLL),
};
-static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
+static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE),
TAB_MAP(WATERMARKS),
TAB_MAP(AVFS),
@@ -213,12 +216,12 @@ static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PACE),
};
-static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC),
PWR_MAP(DC),
};
-static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
@@ -228,100 +231,6 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
-static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_msg_mapping mapping;
-
- if (index >= SMU_MSG_MAX_COUNT)
- return -EINVAL;
-
- mapping = navi10_message_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
- return -EACCES;
-
- return mapping.map_to;
-}
-
-static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_CLK_COUNT)
- return -EINVAL;
-
- mapping = navi10_clk_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_FEATURE_COUNT)
- return -EINVAL;
-
- mapping = navi10_feature_mask_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_TABLE_COUNT)
- return -EINVAL;
-
- mapping = navi10_table_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_POWER_SOURCE_COUNT)
- return -EINVAL;
-
- mapping = navi10_pwr_src_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-
-static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
- return -EINVAL;
-
- mapping = navi10_workload_map[profile];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
static bool is_asic_secure(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -467,7 +376,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
smc_dpm_info);
- ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
(uint8_t **)&smc_dpm_table);
if (ret)
return ret;
@@ -487,7 +396,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
break;
case 7: /* nv12 */
- ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
(uint8_t **)&smc_dpm_table_v4_7);
if (ret)
return ret;
@@ -543,9 +452,10 @@ static int navi10_setup_pptable(struct smu_context *smu)
return ret;
}
-static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int navi10_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -553,6 +463,8 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
@@ -584,7 +496,7 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time ||
time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_SMU_METRICS,
0,
smu_table->metrics_table,
@@ -674,9 +586,6 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- if (smu_dpm->dpm_context)
- return -EINVAL;
-
smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
GFP_KERNEL);
if (!smu_dpm->dpm_context)
@@ -687,6 +596,21 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
return 0;
}
+static int navi10_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = navi10_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = navi10_allocate_dpm_context(smu);
+ if (ret)
+ return ret;
+
+ return smu_v11_0_init_smc_tables(smu);
+}
+
static int navi10_set_default_dpm_table(struct smu_context *smu)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -696,7 +620,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_SOCCLK,
dpm_table);
@@ -714,7 +638,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_GFXCLK,
dpm_table);
@@ -732,7 +656,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* uclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_UCLK,
dpm_table);
@@ -750,7 +674,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* vclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.vclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_VCLK,
dpm_table);
@@ -768,7 +692,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* dclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_DCLK,
dpm_table);
@@ -786,7 +710,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_DCEFCLK,
dpm_table);
@@ -804,7 +728,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* pixelclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.pixel_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_PIXCLK,
dpm_table);
@@ -822,7 +746,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* displayclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.display_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_DISPCLK,
dpm_table);
@@ -840,7 +764,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
/* phyclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.phy_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_PHYCLK,
dpm_table);
@@ -867,15 +791,15 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -892,15 +816,15 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
int ret = 0;
if (enable) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
if (ret)
return ret;
}
@@ -917,7 +841,9 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
MetricsMember_t member_type;
int clk_id = 0;
- clk_id = smu_clk_get_index(smu, clk_type);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0)
return clk_id;
@@ -955,7 +881,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
DpmDescriptor_t *dpm_desc = NULL;
uint32_t clk_index = 0;
- clk_index = smu_clk_get_index(smu, clk_type);
+ clk_index = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
dpm_desc = &pptable->DpmDescriptor[clk_index];
/* 0 - Fine grained DPM, 1 - Discrete DPM */
@@ -1336,11 +1264,11 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
int ret = 0;
uint32_t max_freq = 0;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret)
return ret;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
if (ret)
return ret;
@@ -1357,9 +1285,9 @@ static int navi10_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
+ smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display,
NULL);
if (ret)
@@ -1412,7 +1340,7 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
unsigned long feature_enabled;
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
((uint64_t)feature_mask[1] << 32));
return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1483,11 +1411,13 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_workload_get_type(smu, i);
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
if (workload_type < 0)
return -EINVAL;
- result = smu_update_table(smu,
+ result = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
if (result) {
@@ -1558,7 +1488,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
if (ret) {
@@ -1602,7 +1532,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
break;
}
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
if (ret) {
@@ -1612,10 +1542,12 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
}
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL);
return ret;
@@ -1631,14 +1563,14 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
- if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) {
- if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
- ret = smu_send_smc_msg_with_param(smu,
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100,
NULL);
@@ -1652,7 +1584,7 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
}
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
@@ -1664,68 +1596,66 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
}
static int navi10_set_watermarks_table(struct smu_context *smu,
- void *watermarks, struct
- dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
- int i;
+ Watermarks_t *table = smu->smu_table.watermarks_table;
int ret = 0;
- Watermarks_t *table = watermarks;
+ int i;
- if (!table || !clock_ranges)
- return -EINVAL;
+ if (clock_ranges) {
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
- if (clock_ranges->num_wm_dmif_sets > 4 ||
- clock_ranges->num_wm_mcif_sets > 4)
- return -EINVAL;
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[1][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
- for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
- table->WatermarkRow[1][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MinUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].WmSetting = (uint8_t)
- clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
- }
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[0][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
- for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
- table->WatermarkRow[0][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MinUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].WmSetting = (uint8_t)
- clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
}
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
-
/* pass data to smu controller */
- if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
- ret = smu_write_watermarks_table(smu);
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_cmn_write_watermarks_table(smu);
if (ret) {
dev_err(smu->adev->dev, "Failed to update WMTABLE!");
return ret;
@@ -1960,7 +1890,7 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
(pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
pptable->PcieLaneCount[i] : pcie_width_cap);
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
@@ -2012,7 +1942,7 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
uint32_t value = 0;
int ret;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetVoltageByDpm,
param,
&value);
@@ -2046,7 +1976,7 @@ static int navi10_set_default_od_settings(struct smu_context *smu)
(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
int ret = 0;
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
if (ret) {
dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
return ret;
@@ -2180,18 +2110,11 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
break;
case PP_OD_COMMIT_DPM_TABLE:
navi10_dump_od_table(smu, od_table);
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
if (ret) {
dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
return ret;
}
- // no lock needed because smu_od_edit_dpm_table has it
- ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE,
- false);
- if (ret) {
- return ret;
- }
break;
case PP_OD_EDIT_VDDC_CURVE:
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
@@ -2260,7 +2183,7 @@ static int navi10_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
if (ret)
dev_err(smu->adev->dev, "RunBtc failed!\n");
@@ -2272,9 +2195,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
int result = 0;
if (!enable)
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
+ result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
else
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
+ result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
return result;
}
@@ -2303,7 +2226,7 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
if (!navi10_need_umc_cdr_12gbps_workaround(smu->adev))
return 0;
- ret = smu_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret)
return ret;
@@ -2340,19 +2263,245 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
return navi10_dummy_pstate_control(smu, true);
}
+static void navi10_fill_i2c_req(SwI2cRequest_t *req, bool write,
+ uint8_t address, uint32_t numbytes,
+ uint8_t *data)
+{
+ int i;
+
+ BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+ req->I2CcontrollerPort = 0;
+ req->I2CSpeed = 2;
+ req->SlaveAddress = address;
+ req->NumCmds = numbytes;
+
+ for (i = 0; i < numbytes; i++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
+
+ /* First 2 bytes are always write for lower 2b EEPROM address */
+ if (i < 2)
+ cmd->Cmd = 1;
+ else
+ cmd->Cmd = write;
+
+
+ /* Add RESTART for read after address filled */
+ cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+ /* Add STOP in the end */
+ cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+ /* Fill with data regardless if read or write to simplify code */
+ cmd->RegisterAddr = data[i];
+ }
+}
+
+static int navi10_i2c_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t i, ret = 0;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+
+ memset(&req, 0, sizeof(req));
+ navi10_fill_i2c_req(&req, false, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ /* Now read data starting with that address */
+ ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+ /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
+ for (i = 0; i < numbytes; i++)
+ data[i] = res->SwI2cCmds[i].Data;
+
+ dev_dbg(adev->dev, "navi10_i2c_read_data, address = %x, bytes = %d, data :",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ } else
+ dev_err(adev->dev, "navi10_i2c_read_data - error occurred :%x", ret);
+
+ return ret;
+}
+
+static int navi10_i2c_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ memset(&req, 0, sizeof(req));
+ navi10_fill_i2c_req(&req, true, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ dev_dbg(adev->dev, "navi10_i2c_write(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ */
+ msleep(10);
+
+ } else
+ dev_err(adev->dev, "navi10_i2c_write- error occurred :%x", ret);
+
+ return ret;
+}
+
+static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+ uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+ for (i = 0; i < num; i++) {
+ /*
+ * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+ * once and hence the data needs to be spliced into chunks and sent each
+ * chunk separately
+ */
+ data_size = msgs[i].len - 2;
+ data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+ next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+ data_ptr = msgs[i].buf + 2;
+
+ for (j = 0; j < data_size / data_chunk_size; j++) {
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = navi10_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+
+ memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+ } else {
+
+ memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+ ret = navi10_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+
+ next_eeprom_addr += data_chunk_size;
+ data_ptr += data_chunk_size;
+ }
+
+ if (data_size % data_chunk_size) {
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = navi10_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+
+ memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+ } else {
+ memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+ ret = navi10_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ return num;
+}
+
+static u32 navi10_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm navi10_i2c_algo = {
+ .master_xfer = navi10_i2c_xfer,
+ .functionality = navi10_i2c_func,
+};
+
+static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ return control->dev.parent == &adev->pdev->dev;
+}
+
+static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ /* smu_i2c_eeprom_init may be called twice in sriov */
+ if (navi10_i2c_adapter_is_added(control))
+ return 0;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &navi10_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+{
+ if (!navi10_i2c_adapter_is_added(control))
+ return;
+
+ i2c_del_adapter(control);
+}
+
+
static const struct pptable_funcs navi10_ppt_funcs = {
- .tables_init = navi10_tables_init,
- .alloc_dpm_context = navi10_allocate_dpm_context,
- .get_smu_msg_index = navi10_get_smu_msg_index,
- .get_smu_clk_index = navi10_get_smu_clk_index,
- .get_smu_feature_index = navi10_get_smu_feature_index,
- .get_smu_table_index = navi10_get_smu_table_index,
- .get_smu_power_index = navi10_get_pwr_src_index,
- .get_workload_type = navi10_get_workload_type,
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
+ .i2c_init = navi10_i2c_control_init,
+ .i2c_fini = navi10_i2c_control_fini,
.print_clk_levels = navi10_print_clk_levels,
.force_clk_levels = navi10_force_clk_levels,
.populate_umd_state_clk = navi10_populate_umd_state_clk,
@@ -2376,7 +2525,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
.fini_microcode = smu_v11_0_fini_microcode,
- .init_smc_tables = smu_v11_0_init_smc_tables,
+ .init_smc_tables = navi10_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
@@ -2384,15 +2533,18 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.setup_pptable = navi10_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
.check_fw_version = smu_v11_0_check_fw_version,
- .write_pptable = smu_v11_0_write_pptable,
+ .write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
- .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
- .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
.notify_display_change = smu_v11_0_notify_display_change,
.set_power_limit = smu_v11_0_set_power_limit,
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
@@ -2421,9 +2573,17 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.run_btc = navi10_run_btc,
.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
.set_power_source = smu_v11_0_set_power_source,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &navi10_ppt_funcs;
+ smu->message_map = navi10_message_map;
+ smu->clock_map = navi10_clk_map;
+ smu->feature_map = navi10_feature_mask_map;
+ smu->table_map = navi10_table_map;
+ smu->pwr_src_map = navi10_pwr_src_map;
+ smu->workload_map = navi10_workload_map;
}
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 79cadc2df0d5..575ae4be98a2 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -21,13 +21,15 @@
*
*/
+#define SWSMU_CODE_LAYER_L2
+
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_internal.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
#include "smu_v12_0.h"
#include "renoir_ppt.h"
+#include "smu_cmn.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -39,83 +41,71 @@
#undef pr_info
#undef pr_debug
-#define CLK_MAP(clk, index) \
- [SMU_##clk] = {1, (index)}
-
-#define MSG_MAP(msg, index) \
- [SMU_MSG_##msg] = {1, (index)}
-
-#define TAB_MAP_VALID(tab) \
- [SMU_TABLE_##tab] = {1, TABLE_##tab}
-
-#define TAB_MAP_INVALID(tab) \
- [SMU_TABLE_##tab] = {0, TABLE_##tab}
-
-static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
- MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
- MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
- MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
- MSG_MAP(PowerUpGfx, PPSMC_MSG_PowerUpGfx),
- MSG_MAP(AllowGfxOff, PPSMC_MSG_EnableGfxOff),
- MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff),
- MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile),
- MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile),
- MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn),
- MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn),
- MSG_MAP(PowerDownSdma, PPSMC_MSG_PowerDownSdma),
- MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma),
- MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq),
- MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn),
- MSG_MAP(Spare1, PPSMC_MSG_spare1),
- MSG_MAP(Spare2, PPSMC_MSG_spare2),
- MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch),
- MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq),
- MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify),
- MSG_MAP(SetCustomPolicy, PPSMC_MSG_SetCustomPolicy),
- MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps),
- MSG_MAP(NumOfDisplays, PPSMC_MSG_SetDisplayCount),
- MSG_MAP(QueryPowerLimit, PPSMC_MSG_QueryPowerLimit),
- MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh),
- MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow),
- MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram),
- MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset),
- MSG_MAP(SetGfxclkOverdriveByFreqVid, PPSMC_MSG_SetGfxclkOverdriveByFreqVid),
- MSG_MAP(SetHardMinDcfclkByFreq, PPSMC_MSG_SetHardMinDcfclkByFreq),
- MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq),
- MSG_MAP(ControlIgpuATS, PPSMC_MSG_ControlIgpuATS),
- MSG_MAP(SetMinVideoFclkFreq, PPSMC_MSG_SetMinVideoFclkFreq),
- MSG_MAP(SetMinDeepSleepDcfclk, PPSMC_MSG_SetMinDeepSleepDcfclk),
- MSG_MAP(ForcePowerDownGfx, PPSMC_MSG_ForcePowerDownGfx),
- MSG_MAP(SetPhyclkVoltageByFreq, PPSMC_MSG_SetPhyclkVoltageByFreq),
- MSG_MAP(SetDppclkVoltageByFreq, PPSMC_MSG_SetDppclkVoltageByFreq),
- MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn),
- MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode),
- MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency),
- MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency),
- MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxclkFrequency),
- MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxclkFrequency),
- MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
- MSG_MAP(SetGfxCGPG, PPSMC_MSG_SetGfxCGPG),
- MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk),
- MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk),
- MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq),
- MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq),
- MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn),
- MSG_MAP(PowerGateMmHub, PPSMC_MSG_PowerGateMmHub),
- MSG_MAP(UpdatePmeRestore, PPSMC_MSG_UpdatePmeRestore),
- MSG_MAP(GpuChangeState, PPSMC_MSG_GpuChangeState),
- MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage),
- MSG_MAP(ForceGfxContentSave, PPSMC_MSG_ForceGfxContentSave),
- MSG_MAP(EnableTmdp48MHzRefclkPwrDown, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown),
- MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
- MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg),
- MSG_MAP(PowerGateAtHub, PPSMC_MSG_PowerGateAtHub),
- MSG_MAP(SetSoftMinJpeg, PPSMC_MSG_SetSoftMinJpeg),
- MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq),
+static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(PowerUpGfx, PPSMC_MSG_PowerUpGfx, 1),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_EnableGfxOff, 1),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff, 1),
+ MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1),
+ MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
+ MSG_MAP(PowerDownSdma, PPSMC_MSG_PowerDownSdma, 1),
+ MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma, 1),
+ MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq, 1),
+ MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
+ MSG_MAP(Spare1, PPSMC_MSG_spare1, 1),
+ MSG_MAP(Spare2, PPSMC_MSG_spare2, 1),
+ MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch, 1),
+ MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq, 1),
+ MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1),
+ MSG_MAP(SetCustomPolicy, PPSMC_MSG_SetCustomPolicy, 1),
+ MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1),
+ MSG_MAP(NumOfDisplays, PPSMC_MSG_SetDisplayCount, 1),
+ MSG_MAP(QueryPowerLimit, PPSMC_MSG_QueryPowerLimit, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1),
+ MSG_MAP(SetGfxclkOverdriveByFreqVid, PPSMC_MSG_SetGfxclkOverdriveByFreqVid, 1),
+ MSG_MAP(SetHardMinDcfclkByFreq, PPSMC_MSG_SetHardMinDcfclkByFreq, 1),
+ MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1),
+ MSG_MAP(ControlIgpuATS, PPSMC_MSG_ControlIgpuATS, 1),
+ MSG_MAP(SetMinVideoFclkFreq, PPSMC_MSG_SetMinVideoFclkFreq, 1),
+ MSG_MAP(SetMinDeepSleepDcfclk, PPSMC_MSG_SetMinDeepSleepDcfclk, 1),
+ MSG_MAP(ForcePowerDownGfx, PPSMC_MSG_ForcePowerDownGfx, 1),
+ MSG_MAP(SetPhyclkVoltageByFreq, PPSMC_MSG_SetPhyclkVoltageByFreq, 1),
+ MSG_MAP(SetDppclkVoltageByFreq, PPSMC_MSG_SetDppclkVoltageByFreq, 1),
+ MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
+ MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 1),
+ MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1),
+ MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxclkFrequency, 1),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxclkFrequency, 1),
+ MSG_MAP(SoftReset, PPSMC_MSG_SoftReset, 1),
+ MSG_MAP(SetGfxCGPG, PPSMC_MSG_SetGfxCGPG, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
+ MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1),
+ MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1),
+ MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
+ MSG_MAP(PowerGateMmHub, PPSMC_MSG_PowerGateMmHub, 1),
+ MSG_MAP(UpdatePmeRestore, PPSMC_MSG_UpdatePmeRestore, 1),
+ MSG_MAP(GpuChangeState, PPSMC_MSG_GpuChangeState, 1),
+ MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1),
+ MSG_MAP(ForceGfxContentSave, PPSMC_MSG_ForceGfxContentSave, 1),
+ MSG_MAP(EnableTmdp48MHzRefclkPwrDown, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown, 1),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
+ MSG_MAP(PowerGateAtHub, PPSMC_MSG_PowerGateAtHub, 1),
+ MSG_MAP(SetSoftMinJpeg, PPSMC_MSG_SetSoftMinJpeg, 1),
+ MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1),
};
-static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
+static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, CLOCK_GFXCLK),
CLK_MAP(SCLK, CLOCK_GFXCLK),
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
@@ -123,55 +113,20 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(MCLK, CLOCK_FCLK),
};
-static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
+static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(WATERMARKS),
TAB_MAP_INVALID(CUSTOM_DPM),
TAB_MAP_VALID(DPMCLOCKS),
TAB_MAP_VALID(SMU_METRICS),
};
-static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_12_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_MSG_MAX_COUNT)
- return -EINVAL;
-
- mapping = renoir_message_map[index];
- if (!(mapping.valid_mapping))
- return -EINVAL;
-
- return mapping.map_to;
-}
-
-static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_12_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_CLK_COUNT)
- return -EINVAL;
-
- mapping = renoir_clk_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_12_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_TABLE_COUNT)
- return -EINVAL;
-
- mapping = renoir_table_map[index];
- if (!(mapping.valid_mapping))
- return -EINVAL;
-
- return mapping.map_to;
-}
+static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+};
static int renoir_get_metrics_table(struct smu_context *smu,
SmuMetrics_t *metrics_table)
@@ -181,7 +136,7 @@ static int renoir_get_metrics_table(struct smu_context *smu,
mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ ret = smu_cmn_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
@@ -197,9 +152,10 @@ static int renoir_get_metrics_table(struct smu_context *smu,
return ret;
}
-static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int renoir_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -301,7 +257,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
uint32_t mclk_mask, soc_mask;
uint32_t clock_limit;
- if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
@@ -340,7 +296,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
if (ret) {
dev_err(smu->adev->dev, "Attempt to get max GX frequency from SMC Failed !\n");
goto failed;
@@ -368,7 +324,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
if (ret) {
dev_err(smu->adev->dev, "Attempt to get min GX frequency from SMC Failed !\n");
goto failed;
@@ -509,15 +465,15 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -534,15 +490,15 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
int ret = 0;
if (enable) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret)
return ret;
}
@@ -563,7 +519,9 @@ static int renoir_get_current_clk_freq_by_table(struct smu_context *smu,
if (ret)
return ret;
- clk_id = smu_clk_get_index(smu, clk_type);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0)
return clk_id;
@@ -615,7 +573,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
};
for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
- if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
+ if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
continue;
clk_type = clk_feature_map[i].clk_type;
@@ -676,35 +634,6 @@ static int renoir_get_current_activity_percent(struct smu_context *smu,
return 0;
}
-static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
-{
-
- uint32_t pplib_workload = 0;
-
- switch (profile) {
- case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
- pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
- break;
- case PP_SMC_POWER_PROFILE_CUSTOM:
- pplib_workload = WORKLOAD_PPLIB_COUNT;
- break;
- case PP_SMC_POWER_PROFILE_VIDEO:
- pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
- break;
- case PP_SMC_POWER_PROFILE_VR:
- pplib_workload = WORKLOAD_PPLIB_VR_BIT;
- break;
- case PP_SMC_POWER_PROFILE_COMPUTE:
- pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
- break;
- default:
- return -EINVAL;
- }
-
- return pplib_workload;
-}
-
-
/**
* This interface get dpm clock table for dc
*/
@@ -760,13 +689,13 @@ static int renoir_force_clk_levels(struct smu_context *smu,
ret = renoir_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min_freq, &max_freq);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
soft_max_level == 0 ? min_freq :
soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
soft_min_level == 2 ? max_freq :
soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
NULL);
@@ -780,10 +709,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -795,10 +724,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -820,7 +749,9 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
}
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_workload_get_type(smu, profile_mode);
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
if (workload_type < 0) {
/*
* TODO: If some case need switch to powersave/default power mode
@@ -830,7 +761,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL;
}
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1 << workload_type,
NULL);
if (ret) {
@@ -912,60 +843,59 @@ static int renoir_set_performance_level(struct smu_context *smu,
*/
static int renoir_set_watermarks_table(
struct smu_context *smu,
- void *watermarks,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
- int i;
+ Watermarks_t *table = smu->smu_table.watermarks_table;
int ret = 0;
- Watermarks_t *table = watermarks;
+ int i;
- if (!table || !clock_ranges)
- return -EINVAL;
+ if (clock_ranges) {
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
- if (clock_ranges->num_wm_dmif_sets > 4 ||
- clock_ranges->num_wm_mcif_sets > 4)
- return -EINVAL;
+ /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
- /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
- for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
- table->WatermarkRow[WM_DCFCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
- table->WatermarkRow[WM_DCFCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
- table->WatermarkRow[WM_DCFCLK][i].MinMclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
- table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
- table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
- clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
- }
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
- for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
- table->WatermarkRow[WM_SOCCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
- table->WatermarkRow[WM_SOCCLK][i].MinMclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
- table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
- table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
- clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
}
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
-
/* pass data to smu controller */
- if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
- ret = smu_write_watermarks_table(smu);
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_cmn_write_watermarks_table(smu);
if (ret) {
dev_err(smu->adev->dev, "Failed to update WMTABLE!");
return ret;
@@ -998,7 +928,9 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on arcturus.
*/
- workload_type = smu_workload_get_type(smu, i);
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
if (workload_type < 0)
continue;
@@ -1064,16 +996,11 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
}
static const struct pptable_funcs renoir_ppt_funcs = {
- .get_smu_msg_index = renoir_get_smu_msg_index,
- .get_smu_clk_index = renoir_get_smu_clk_index,
- .get_smu_table_index = renoir_get_smu_table_index,
- .tables_init = renoir_tables_init,
.set_power_state = NULL,
.print_clk_levels = renoir_print_clk_levels,
.get_current_power_state = renoir_get_current_power_state,
.dpm_set_vcn_enable = renoir_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable,
- .get_workload_type = renoir_get_workload_type,
.force_clk_levels = renoir_force_clk_levels,
.set_power_profile_mode = renoir_set_power_profile_mode,
.set_performance_level = renoir_set_performance_level,
@@ -1084,23 +1011,33 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.check_fw_status = smu_v12_0_check_fw_status,
.check_fw_version = smu_v12_0_check_fw_version,
.powergate_sdma = smu_v12_0_powergate_sdma,
- .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
.gfx_off_control = smu_v12_0_gfx_off_control,
- .init_smc_tables = smu_v12_0_init_smc_tables,
+ .get_gfx_off_status = smu_v12_0_get_gfxoff_status,
+ .init_smc_tables = renoir_init_smc_tables,
.fini_smc_tables = smu_v12_0_fini_smc_tables,
.set_default_dpm_table = smu_v12_0_set_default_dpm_tables,
- .get_enabled_mask = smu_v12_0_get_enabled_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
.get_dpm_ultimate_freq = renoir_get_dpm_ultimate_freq,
.mode2_reset = smu_v12_0_mode2_reset,
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
.set_driver_table_location = smu_v12_0_set_driver_table_location,
.is_dpm_running = renoir_is_dpm_running,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &renoir_ppt_funcs;
+ smu->message_map = renoir_message_map;
+ smu->clock_map = renoir_clk_map;
+ smu->table_map = renoir_table_map;
+ smu->workload_map = renoir_workload_map;
smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
index 5faef41b63a3..59da3ca2a4ca 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
@@ -21,13 +21,16 @@
*
*/
+#define SWSMU_CODE_LAYER_L2
+
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <linux/i2c.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_sienna_cichlid.h"
#include "soc15_common.h"
@@ -42,6 +45,9 @@
#include "mp/mp_11_0_offset.h"
#include "mp/mp_11_0_sh_mask.h"
+#include "asic_reg/mp/mp_11_0_sh_mask.h"
+#include "smu_cmn.h"
+
/*
* DO NOT use these for err/warn/info/debug messages.
* Use dev_err, dev_warn, dev_info and dev_dbg instead.
@@ -52,6 +58,8 @@
#undef pr_info
#undef pr_debug
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -62,64 +70,60 @@
FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
-#define MSG_MAP(msg, index) \
- [SMU_MSG_##msg] = {1, (index)}
-
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
- MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
- MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
- MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
- MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow),
- MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh),
- MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures),
- MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures),
- MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow),
- MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh),
- MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow),
- MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh),
- MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow),
- MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh),
- MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask),
- MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit),
- MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh),
- MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow),
- MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh),
- MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow),
- MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram),
- MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu),
- MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable),
- MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco),
- MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq),
- MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq),
- MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq),
- MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq),
- MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq),
- MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq),
- MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex),
- MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode),
- MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh),
- MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow),
- MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters),
- MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt),
- MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource),
- MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch),
- MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps),
- MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload),
- MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff),
- MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff),
- MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit),
- MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq),
- MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco),
- MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn),
- MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn),
- MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg),
- MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
- MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME),
- MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
- MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset),
+static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 1),
+ MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
+ MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
+ MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
+ MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
+ MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 1),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 1),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
+ MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 1),
+ MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 1),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
+ MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 1),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 1),
+ MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 1),
+ MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 1),
+ MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 1),
+ MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 1),
+ MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 1),
+ MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 1),
+ MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
+ MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 1),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
+ MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 1),
+ MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 1),
};
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
CLK_MAP(SCLK, PPCLK_GFXCLK),
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
@@ -127,8 +131,8 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] =
CLK_MAP(UCLK, PPCLK_UCLK),
CLK_MAP(MCLK, PPCLK_UCLK),
CLK_MAP(DCLK, PPCLK_DCLK_0),
- CLK_MAP(DCLK1, PPCLK_DCLK_0),
- CLK_MAP(VCLK, PPCLK_VCLK_1),
+ CLK_MAP(DCLK1, PPCLK_DCLK_1),
+ CLK_MAP(VCLK, PPCLK_VCLK_0),
CLK_MAP(VCLK1, PPCLK_VCLK_1),
CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
CLK_MAP(DISPCLK, PPCLK_DISPCLK),
@@ -136,7 +140,7 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] =
CLK_MAP(PHYCLK, PPCLK_PHYCLK),
};
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(DPM_PREFETCHER),
FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_GFX_GPO),
@@ -179,7 +183,7 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_feature_mask_map[SMU_FEAT
FEA_MAP(APCC_DFLL),
};
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PPTABLE),
TAB_MAP(WATERMARKS),
TAB_MAP(AVFS_PSM_DEBUG),
@@ -193,12 +197,12 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT
TAB_MAP(PACE),
};
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
PWR_MAP(AC),
PWR_MAP(DC),
};
-static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
@@ -208,96 +212,6 @@ static struct smu_11_0_cmn2aisc_mapping sienna_cichlid_workload_map[PP_SMC_POWER
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
-static int sienna_cichlid_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_MSG_MAX_COUNT)
- return -EINVAL;
-
- mapping = sienna_cichlid_message_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int sienna_cichlid_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_CLK_COUNT)
- return -EINVAL;
-
- mapping = sienna_cichlid_clk_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int sienna_cichlid_get_smu_feature_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_FEATURE_COUNT)
- return -EINVAL;
-
- mapping = sienna_cichlid_feature_mask_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int sienna_cichlid_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_TABLE_COUNT)
- return -EINVAL;
-
- mapping = sienna_cichlid_table_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int sienna_cichlid_get_pwr_src_index(struct smu_context *smc, uint32_t index)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (index >= SMU_POWER_SOURCE_COUNT)
- return -EINVAL;
-
- mapping = sienna_cichlid_pwr_src_map[index];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
-static int sienna_cichlid_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
-{
- struct smu_11_0_cmn2aisc_mapping mapping;
-
- if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
- return -EINVAL;
-
- mapping = sienna_cichlid_workload_map[profile];
- if (!(mapping.valid_mapping)) {
- return -EINVAL;
- }
-
- return mapping.map_to;
-}
-
static int
sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
@@ -399,7 +313,7 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
smc_dpm_info);
- ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
(uint8_t **)&smc_dpm_table);
if (ret)
return ret;
@@ -445,9 +359,10 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
return ret;
}
-static int sienna_cichlid_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int sienna_cichlid_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -455,6 +370,8 @@ static int sienna_cichlid_tables_init(struct smu_context *smu, struct smu_table
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
@@ -486,7 +403,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time ||
time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_SMU_METRICS,
0,
smu_table->metrics_table,
@@ -583,9 +500,6 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- if (smu_dpm->dpm_context)
- return -EINVAL;
-
smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
GFP_KERNEL);
if (!smu_dpm->dpm_context)
@@ -596,16 +510,32 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu)
return 0;
}
+static int sienna_cichlid_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = sienna_cichlid_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = sienna_cichlid_allocate_dpm_context(smu);
+ if (ret)
+ return ret;
+
+ return smu_v11_0_init_smc_tables(smu);
+}
+
static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
struct smu_11_0_dpm_table *dpm_table;
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_SOCCLK,
dpm_table);
@@ -623,7 +553,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_GFXCLK,
dpm_table);
@@ -641,7 +571,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
/* uclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_UCLK,
dpm_table);
@@ -659,7 +589,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
/* fclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.fclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_FCLK,
dpm_table);
@@ -677,7 +607,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
/* vclk0 dpm table setup */
dpm_table = &dpm_context->dpm_tables.vclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_VCLK,
dpm_table);
@@ -694,26 +624,29 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
}
/* vclk1 dpm table setup */
- dpm_table = &dpm_context->dpm_tables.vclk1_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_VCLK1,
- dpm_table);
- if (ret)
- return ret;
- dpm_table->is_fine_grained =
- !driver_ppt->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete;
- } else {
- dpm_table->count = 1;
- dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
- dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
+ if (adev->vcn.num_vcn_inst > 1) {
+ dpm_table = &dpm_context->dpm_tables.vclk1_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_v11_0_set_single_dpm_table(smu,
+ SMU_VCLK1,
+ dpm_table);
+ if (ret)
+ return ret;
+ dpm_table->is_fine_grained =
+ !driver_ppt->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value =
+ smu->smu_table.boot_values.vclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
}
/* dclk0 dpm table setup */
dpm_table = &dpm_context->dpm_tables.dclk_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_DCLK,
dpm_table);
@@ -730,26 +663,29 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
}
/* dclk1 dpm table setup */
- dpm_table = &dpm_context->dpm_tables.dclk1_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_DCLK1,
- dpm_table);
- if (ret)
- return ret;
- dpm_table->is_fine_grained =
- !driver_ppt->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete;
- } else {
- dpm_table->count = 1;
- dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
- dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
+ if (adev->vcn.num_vcn_inst > 1) {
+ dpm_table = &dpm_context->dpm_tables.dclk1_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_v11_0_set_single_dpm_table(smu,
+ SMU_DCLK1,
+ dpm_table);
+ if (ret)
+ return ret;
+ dpm_table->is_fine_grained =
+ !driver_ppt->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value =
+ smu->smu_table.boot_values.dclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
}
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_DCEFCLK,
dpm_table);
@@ -767,7 +703,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
/* pixelclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.pixel_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_PIXCLK,
dpm_table);
@@ -785,7 +721,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
/* displayclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.display_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_DISPCLK,
dpm_table);
@@ -803,7 +739,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
/* phyclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.phy_table;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
SMU_PHYCLK,
dpm_table);
@@ -832,12 +768,12 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret)
return ret;
if (adev->asic_type == CHIP_SIENNA_CICHLID) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
0x10000, NULL);
if (ret)
return ret;
@@ -845,12 +781,12 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
}
power_gate->vcn_gated = false;
} else {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
if (ret)
return ret;
if (adev->asic_type == CHIP_SIENNA_CICHLID) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
0x10000, NULL);
if (ret)
return ret;
@@ -869,15 +805,15 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab
int ret = 0;
if (enable) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret)
return ret;
}
@@ -894,7 +830,9 @@ static int sienna_cichlid_get_current_clk_freq_by_table(struct smu_context *smu,
MetricsMember_t member_type;
int clk_id = 0;
- clk_id = smu_clk_get_index(smu, clk_type);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0)
return clk_id;
@@ -942,7 +880,9 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
DpmDescriptor_t *dpm_desc = NULL;
uint32_t clk_index = 0;
- clk_index = smu_clk_get_index(smu, clk_type);
+ clk_index = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
dpm_desc = &pptable->DpmDescriptor[clk_index];
/* 0 - Fine grained DPM, 1 - Discrete DPM */
@@ -1131,12 +1071,12 @@ static int sienna_cichlid_pre_display_config_changed(struct smu_context *smu)
/* Sienna_Cichlid do not support to change display num currently */
return 0;
#if 0
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret)
return ret;
#endif
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
if (ret)
return ret;
@@ -1153,10 +1093,10 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
#if 0
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display,
NULL);
#endif
@@ -1210,7 +1150,7 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
unsigned long feature_enabled;
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
((uint64_t)feature_mask[1] << 32));
return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1281,11 +1221,13 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_workload_get_type(smu, i);
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
if (workload_type < 0)
return -EINVAL;
- result = smu_update_table(smu,
+ result = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
if (result) {
@@ -1356,7 +1298,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
if (ret) {
@@ -1400,7 +1342,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
break;
}
- ret = smu_update_table(smu,
+ ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
if (ret) {
@@ -1410,10 +1352,12 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
}
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL);
return ret;
@@ -1429,14 +1373,14 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
- if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) {
- if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
- ret = smu_send_smc_msg_with_param(smu,
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100,
NULL);
@@ -1450,7 +1394,7 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
}
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
@@ -1462,67 +1406,66 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
}
static int sienna_cichlid_set_watermarks_table(struct smu_context *smu,
- void *watermarks, struct
- dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
+ struct dm_pp_wm_sets_with_clock_ranges_soc15
+ *clock_ranges)
{
- int i;
+ Watermarks_t *table = smu->smu_table.watermarks_table;
int ret = 0;
- Watermarks_t *table = watermarks;
+ int i;
- if (!table || !clock_ranges)
- return -EINVAL;
+ if (clock_ranges) {
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[1][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[0][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
- if (clock_ranges->num_wm_dmif_sets > 4 ||
- clock_ranges->num_wm_mcif_sets > 4)
- return -EINVAL;
-
- for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
- table->WatermarkRow[1][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MinUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].WmSetting = (uint8_t)
- clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
- }
-
- for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
- table->WatermarkRow[0][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MinUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].WmSetting = (uint8_t)
- clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
- }
-
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
-
- if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
- ret = smu_write_watermarks_table(smu);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ }
+
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_cmn_write_watermarks_table(smu);
if (ret) {
dev_err(smu->adev->dev, "Failed to update WMTABLE!");
return ret;
@@ -1759,7 +1702,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] :
pcie_width_cap);
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
@@ -1814,7 +1757,7 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
* SRIOV env will not support SMU mode1 reset
* PM FW support mode1 reset from 58.26
*/
- smu_get_smc_version(smu, NULL, &smu_version);
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (amdgpu_sriov_vf(adev) || (smu_version < 0x003a1a00))
return false;
@@ -2487,19 +2430,245 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]);
}
+static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
+ uint8_t address, uint32_t numbytes,
+ uint8_t *data)
+{
+ int i;
+
+ BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+ req->I2CcontrollerPort = 0;
+ req->I2CSpeed = 2;
+ req->SlaveAddress = address;
+ req->NumCmds = numbytes;
+
+ for (i = 0; i < numbytes; i++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
+
+ /* First 2 bytes are always write for lower 2b EEPROM address */
+ if (i < 2)
+ cmd->CmdConfig = CMDCONFIG_READWRITE_MASK;
+ else
+ cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0;
+
+
+ /* Add RESTART for read after address filled */
+ cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+ /* Add STOP in the end */
+ cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+ /* Fill with data regardless if read or write to simplify code */
+ cmd->ReadWriteData = data[i];
+ }
+}
+
+static int sienna_cichlid_i2c_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t i, ret = 0;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+
+ memset(&req, 0, sizeof(req));
+ sienna_cichlid_fill_i2c_req(&req, false, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ /* Now read data starting with that address */
+ ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+ /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
+ for (i = 0; i < numbytes; i++)
+ data[i] = res->SwI2cCmds[i].ReadWriteData;
+
+ dev_dbg(adev->dev, "sienna_cichlid_i2c_read_data, address = %x, bytes = %d, data :",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ } else
+ dev_err(adev->dev, "sienna_cichlid_i2c_read_data - error occurred :%x", ret);
+
+ return ret;
+}
+
+static int sienna_cichlid_i2c_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ memset(&req, 0, sizeof(req));
+ sienna_cichlid_fill_i2c_req(&req, true, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ dev_dbg(adev->dev, "sienna_cichlid_i2c_write(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ */
+ msleep(10);
+
+ } else
+ dev_err(adev->dev, "sienna_cichlid_i2c_write- error occurred :%x", ret);
+
+ return ret;
+}
+
+static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+ uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+ for (i = 0; i < num; i++) {
+ /*
+ * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+ * once and hence the data needs to be spliced into chunks and sent each
+ * chunk separately
+ */
+ data_size = msgs[i].len - 2;
+ data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+ next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+ data_ptr = msgs[i].buf + 2;
+
+ for (j = 0; j < data_size / data_chunk_size; j++) {
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = sienna_cichlid_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+
+ memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+ } else {
+
+ memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+ ret = sienna_cichlid_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+
+ next_eeprom_addr += data_chunk_size;
+ data_ptr += data_chunk_size;
+ }
+
+ if (data_size % data_chunk_size) {
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = sienna_cichlid_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+
+ memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+ } else {
+ memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+ ret = sienna_cichlid_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ return num;
+}
+
+static u32 sienna_cichlid_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm sienna_cichlid_i2c_algo = {
+ .master_xfer = sienna_cichlid_i2c_xfer,
+ .functionality = sienna_cichlid_i2c_func,
+};
+
+static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ return control->dev.parent == &adev->pdev->dev;
+}
+
+static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ /* smu_i2c_eeprom_init may be called twice in sriov */
+ if (sienna_cichlid_i2c_adapter_is_added(control))
+ return 0;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &sienna_cichlid_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+{
+ if (!sienna_cichlid_i2c_adapter_is_added(control))
+ return;
+
+ i2c_del_adapter(control);
+}
+
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
- .tables_init = sienna_cichlid_tables_init,
- .alloc_dpm_context = sienna_cichlid_allocate_dpm_context,
- .get_smu_msg_index = sienna_cichlid_get_smu_msg_index,
- .get_smu_clk_index = sienna_cichlid_get_smu_clk_index,
- .get_smu_feature_index = sienna_cichlid_get_smu_feature_index,
- .get_smu_table_index = sienna_cichlid_get_smu_table_index,
- .get_smu_power_index = sienna_cichlid_get_pwr_src_index,
- .get_workload_type = sienna_cichlid_get_workload_type,
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
.dpm_set_vcn_enable = sienna_cichlid_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = sienna_cichlid_dpm_set_jpeg_enable,
+ .i2c_init = sienna_cichlid_i2c_control_init,
+ .i2c_fini = sienna_cichlid_i2c_control_fini,
.print_clk_levels = sienna_cichlid_print_clk_levels,
.force_clk_levels = sienna_cichlid_force_clk_levels,
.populate_umd_state_clk = sienna_cichlid_populate_umd_state_clk,
@@ -2522,7 +2691,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
- .init_smc_tables = smu_v11_0_init_smc_tables,
+ .init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
@@ -2530,15 +2699,18 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.setup_pptable = sienna_cichlid_setup_pptable,
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
.check_fw_version = smu_v11_0_check_fw_version,
- .write_pptable = smu_v11_0_write_pptable,
+ .write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
- .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = NULL,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
- .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
.notify_display_change = NULL,
.set_power_limit = smu_v11_0_set_power_limit,
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
@@ -2564,9 +2736,17 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.mode1_reset = smu_v11_0_mode1_reset,
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &sienna_cichlid_ppt_funcs;
+ smu->message_map = sienna_cichlid_message_map;
+ smu->clock_map = sienna_cichlid_clk_map;
+ smu->feature_map = sienna_cichlid_feature_mask_map;
+ smu->table_map = sienna_cichlid_table_map;
+ smu->pwr_src_map = sienna_cichlid_pwr_src_map;
+ smu->workload_map = sienna_cichlid_workload_map;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.c b/drivers/gpu/drm/amd/powerplay/smu_cmn.c
new file mode 100644
index 000000000000..be4b678d0e60
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_cmn.c
@@ -0,0 +1,633 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define SWSMU_CODE_LAYER_L4
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_cmn.h"
+#include "soc15_common.h"
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+/*
+ * Although these are defined in each ASIC's specific header file.
+ * They share the same definitions and values. That makes common
+ * APIs for SMC messages issuing for all ASICs possible.
+ */
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+
+#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(type) #type
+static const char* __smu_message_names[] = {
+ SMU_MESSAGE_TYPES
+};
+
+static const char *smu_get_message_name(struct smu_context *smu,
+ enum smu_message_type type)
+{
+ if (type < 0 || type >= SMU_MSG_MAX_COUNT)
+ return "unknown smu message";
+
+ return __smu_message_names[type];
+}
+
+static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+}
+
+static void smu_cmn_read_arg(struct smu_context *smu,
+ uint32_t *arg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
+}
+
+static int smu_cmn_wait_for_response(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
+
+ for (i = 0; i < timeout; i++) {
+ cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
+ return cur_value == 0x1 ? 0 : -EIO;
+
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == timeout)
+ return -ETIME;
+
+ return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+}
+
+int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
+ uint32_t param,
+ uint32_t *read_arg)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_MSG,
+ msg);
+ if (index < 0)
+ return index == -EACCES ? 0 : index;
+
+ mutex_lock(&smu->message_lock);
+ ret = smu_cmn_wait_for_response(smu);
+ if (ret) {
+ dev_err(adev->dev, "Msg issuing pre-check failed and "
+ "SMU may be not in the right state!\n");
+ goto out;
+ }
+
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+
+ smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_cmn_wait_for_response(smu);
+ if (ret) {
+ dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param, ret);
+ goto out;
+ }
+
+ if (read_arg)
+ smu_cmn_read_arg(smu, read_arg);
+
+out:
+ mutex_unlock(&smu->message_lock);
+ return ret;
+}
+
+int smu_cmn_send_smc_msg(struct smu_context *smu,
+ enum smu_message_type msg,
+ uint32_t *read_arg)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ msg,
+ 0,
+ read_arg);
+}
+
+int smu_cmn_to_asic_specific_index(struct smu_context *smu,
+ enum smu_cmn2asic_mapping_type type,
+ uint32_t index)
+{
+ struct cmn2asic_msg_mapping msg_mapping;
+ struct cmn2asic_mapping mapping;
+
+ switch (type) {
+ case CMN2ASIC_MAPPING_MSG:
+ if (index > SMU_MSG_MAX_COUNT ||
+ !smu->message_map)
+ return -EINVAL;
+
+ msg_mapping = smu->message_map[index];
+ if (!msg_mapping.valid_mapping)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(smu->adev) &&
+ !msg_mapping.valid_in_vf)
+ return -EACCES;
+
+ return msg_mapping.map_to;
+
+ case CMN2ASIC_MAPPING_CLK:
+ if (index > SMU_CLK_COUNT ||
+ !smu->clock_map)
+ return -EINVAL;
+
+ mapping = smu->clock_map[index];
+ if (!mapping.valid_mapping)
+ return -EINVAL;
+
+ return mapping.map_to;
+
+ case CMN2ASIC_MAPPING_FEATURE:
+ if (index > SMU_FEATURE_COUNT ||
+ !smu->feature_map)
+ return -EINVAL;
+
+ mapping = smu->feature_map[index];
+ if (!mapping.valid_mapping)
+ return -EINVAL;
+
+ return mapping.map_to;
+
+ case CMN2ASIC_MAPPING_TABLE:
+ if (index > SMU_TABLE_COUNT ||
+ !smu->table_map)
+ return -EINVAL;
+
+ mapping = smu->table_map[index];
+ if (!mapping.valid_mapping)
+ return -EINVAL;
+
+ return mapping.map_to;
+
+ case CMN2ASIC_MAPPING_PWR:
+ if (index > SMU_POWER_SOURCE_COUNT ||
+ !smu->pwr_src_map)
+ return -EINVAL;
+
+ mapping = smu->pwr_src_map[index];
+ if (!mapping.valid_mapping)
+ return -EINVAL;
+
+ return mapping.map_to;
+
+ case CMN2ASIC_MAPPING_WORKLOAD:
+ if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
+ !smu->workload_map)
+ return -EINVAL;
+
+ mapping = smu->workload_map[index];
+ if (!mapping.valid_mapping)
+ return -EINVAL;
+
+ return mapping.map_to;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int smu_cmn_feature_is_supported(struct smu_context *smu,
+ enum smu_feature_mask mask)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int feature_id;
+ int ret = 0;
+
+ feature_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_FEATURE,
+ mask);
+ if (feature_id < 0)
+ return 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = test_bit(feature_id, feature->supported);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_cmn_feature_is_enabled(struct smu_context *smu,
+ enum smu_feature_mask mask)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int feature_id;
+ int ret = 0;
+
+ if (smu->is_apu)
+ return 1;
+ feature_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_FEATURE,
+ mask);
+ if (feature_id < 0)
+ return 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = test_bit(feature_id, feature->enabled);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
+ enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ feature_id = SMU_FEATURE_DPM_UCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu, feature_id))
+ return false;
+
+ return true;
+}
+
+int smu_cmn_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask,
+ uint32_t num)
+{
+ uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ if (!feature_mask || num < 2)
+ return -EINVAL;
+
+ if (bitmap_empty(feature->enabled, feature->feature_num)) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
+ if (ret)
+ return ret;
+
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+ } else {
+ bitmap_copy((unsigned long *)feature_mask, feature->enabled,
+ feature->feature_num);
+ }
+
+ return ret;
+}
+
+static int smu_cmn_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ if (enabled) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EnableSmuFeaturesLow,
+ lower_32_bits(feature_mask),
+ NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EnableSmuFeaturesHigh,
+ upper_32_bits(feature_mask),
+ NULL);
+ if (ret)
+ return ret;
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DisableSmuFeaturesLow,
+ lower_32_bits(feature_mask),
+ NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DisableSmuFeaturesHigh,
+ upper_32_bits(feature_mask),
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&feature->mutex);
+ if (enabled)
+ bitmap_or(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ else
+ bitmap_andnot(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_cmn_feature_set_enabled(struct smu_context *smu,
+ enum smu_feature_mask mask,
+ bool enable)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int feature_id;
+
+ feature_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_FEATURE,
+ mask);
+ if (feature_id < 0)
+ return -EINVAL;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ return smu_cmn_feature_update_enable_state(smu,
+ 1ULL << feature_id,
+ enable);
+}
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(fea) #fea
+static const char* __smu_feature_names[] = {
+ SMU_FEATURE_MASKS
+};
+
+static const char *smu_get_feature_name(struct smu_context *smu,
+ enum smu_feature_mask feature)
+{
+ if (feature < 0 || feature >= SMU_FEATURE_COUNT)
+ return "unknown smu feature";
+ return __smu_feature_names[feature];
+}
+
+size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
+ char *buf)
+{
+ uint32_t feature_mask[2] = { 0 };
+ int32_t feature_index = 0;
+ uint32_t count = 0;
+ uint32_t sort_feature[SMU_FEATURE_COUNT];
+ uint64_t hw_feature_count = 0;
+ size_t size = 0;
+ int ret = 0, i;
+
+ ret = smu_cmn_get_enabled_mask(smu,
+ feature_mask,
+ 2);
+ if (ret)
+ return 0;
+
+ size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
+ feature_mask[1], feature_mask[0]);
+
+ for (i = 0; i < SMU_FEATURE_COUNT; i++) {
+ feature_index = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_FEATURE,
+ i);
+ if (feature_index < 0)
+ continue;
+ sort_feature[feature_index] = i;
+ hw_feature_count++;
+ }
+
+ for (i = 0; i < hw_feature_count; i++) {
+ size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
+ count++,
+ smu_get_feature_name(smu, sort_feature[i]),
+ i,
+ !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
+ "enabled" : "disabled");
+ }
+
+ return size;
+}
+
+int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
+ uint64_t new_mask)
+{
+ int ret = 0;
+ uint32_t feature_mask[2] = { 0 };
+ uint64_t feature_2_enabled = 0;
+ uint64_t feature_2_disabled = 0;
+ uint64_t feature_enables = 0;
+
+ ret = smu_cmn_get_enabled_mask(smu,
+ feature_mask,
+ 2);
+ if (ret)
+ return ret;
+
+ feature_enables = ((uint64_t)feature_mask[1] << 32 |
+ (uint64_t)feature_mask[0]);
+
+ feature_2_enabled = ~feature_enables & new_mask;
+ feature_2_disabled = feature_enables & ~new_mask;
+
+ if (feature_2_enabled) {
+ ret = smu_cmn_feature_update_enable_state(smu,
+ feature_2_enabled,
+ true);
+ if (ret)
+ return ret;
+ }
+ if (feature_2_disabled) {
+ ret = smu_cmn_feature_update_enable_state(smu,
+ feature_2_disabled,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
+ enum smu_feature_mask mask)
+{
+ uint64_t features_to_disable = U64_MAX;
+ int skipped_feature_id;
+
+ skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_FEATURE,
+ mask);
+ if (skipped_feature_id < 0)
+ return -EINVAL;
+
+ features_to_disable &= ~(1ULL << skipped_feature_id);
+
+ return smu_cmn_feature_update_enable_state(smu,
+ features_to_disable,
+ 0);
+}
+
+int smu_cmn_get_smc_version(struct smu_context *smu,
+ uint32_t *if_version,
+ uint32_t *smu_version)
+{
+ int ret = 0;
+
+ if (!if_version && !smu_version)
+ return -EINVAL;
+
+ if (smu->smc_fw_if_version && smu->smc_fw_version)
+ {
+ if (if_version)
+ *if_version = smu->smc_fw_if_version;
+
+ if (smu_version)
+ *smu_version = smu->smc_fw_version;
+
+ return 0;
+ }
+
+ if (if_version) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
+ if (ret)
+ return ret;
+
+ smu->smc_fw_if_version = *if_version;
+ }
+
+ if (smu_version) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
+ if (ret)
+ return ret;
+
+ smu->smc_fw_version = *smu_version;
+ }
+
+ return ret;
+}
+
+int smu_cmn_update_table(struct smu_context *smu,
+ enum smu_table_id table_index,
+ int argument,
+ void *table_data,
+ bool drv2smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table *table = &smu_table->driver_table;
+ int table_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_TABLE,
+ table_index);
+ uint32_t table_size;
+ int ret = 0;
+ if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+ return -EINVAL;
+
+ table_size = smu_table->tables[table_index].size;
+
+ if (drv2smu) {
+ memcpy(table->cpu_addr, table_data, table_size);
+ /*
+ * Flush hdp cache: to guard the content seen by
+ * GPU is consitent with CPU.
+ */
+ amdgpu_asic_flush_hdp(adev, NULL);
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
+ SMU_MSG_TransferTableDram2Smu :
+ SMU_MSG_TransferTableSmu2Dram,
+ table_id | ((argument & 0xFFFF) << 16),
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!drv2smu) {
+ amdgpu_asic_flush_hdp(adev, NULL);
+ memcpy(table_data, table->cpu_addr, table_size);
+ }
+
+ return ret;
+}
+
+int smu_cmn_write_watermarks_table(struct smu_context *smu)
+{
+ void *watermarks_table = smu->smu_table.watermarks_table;
+
+ if (!watermarks_table)
+ return -EINVAL;
+
+ return smu_cmn_update_table(smu,
+ SMU_TABLE_WATERMARKS,
+ 0,
+ watermarks_table,
+ true);
+}
+
+int smu_cmn_write_pptable(struct smu_context *smu)
+{
+ void *pptable = smu->smu_table.driver_pptable;
+
+ return smu_cmn_update_table(smu,
+ SMU_TABLE_PPTABLE,
+ 0,
+ pptable,
+ true);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.h b/drivers/gpu/drm/amd/powerplay/smu_cmn.h
new file mode 100644
index 000000000000..98face8c5fd6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_cmn.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_CMN_H__
+#define __SMU_CMN_H__
+
+#include "amdgpu_smu.h"
+
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
+ enum smu_message_type msg,
+ uint32_t param,
+ uint32_t *read_arg);
+
+int smu_cmn_send_smc_msg(struct smu_context *smu,
+ enum smu_message_type msg,
+ uint32_t *read_arg);
+
+int smu_cmn_to_asic_specific_index(struct smu_context *smu,
+ enum smu_cmn2asic_mapping_type type,
+ uint32_t index);
+
+int smu_cmn_feature_is_supported(struct smu_context *smu,
+ enum smu_feature_mask mask);
+
+int smu_cmn_feature_is_enabled(struct smu_context *smu,
+ enum smu_feature_mask mask);
+
+bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
+ enum smu_clk_type clk_type);
+
+int smu_cmn_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask,
+ uint32_t num);
+
+int smu_cmn_feature_set_enabled(struct smu_context *smu,
+ enum smu_feature_mask mask,
+ bool enable);
+
+size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
+ char *buf);
+
+int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
+ uint64_t new_mask);
+
+int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
+ enum smu_feature_mask mask);
+
+int smu_cmn_get_smc_version(struct smu_context *smu,
+ uint32_t *if_version,
+ uint32_t *smu_version);
+
+int smu_cmn_update_table(struct smu_context *smu,
+ enum smu_table_id table_index,
+ int argument,
+ void *table_data,
+ bool drv2smu);
+
+int smu_cmn_write_watermarks_table(struct smu_context *smu);
+
+int smu_cmn_write_pptable(struct smu_context *smu);
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index afb3ef874fc5..d0deaefd3feb 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -25,6 +25,8 @@
#include "amdgpu_smu.h"
+#if defined(SWSMU_CODE_LAYER_L1)
+
#define smu_ppt_funcs(intf, ret, smu, args...) \
((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? (smu)->ppt_funcs->intf(smu, ##args) : ret) : -EINVAL)
@@ -44,22 +46,23 @@
#define smu_set_tool_table_location(smu) smu_ppt_funcs(set_tool_table_location, 0, smu)
#define smu_notify_memory_pool_location(smu) smu_ppt_funcs(notify_memory_pool_location, 0, smu)
#define smu_gfx_off_control(smu, enable) smu_ppt_funcs(gfx_off_control, 0, smu, enable)
+#define smu_get_gfx_off_status(smu) smu_ppt_funcs(get_gfx_off_status, 0, smu)
#define smu_set_last_dcef_min_deep_sleep_clk(smu) smu_ppt_funcs(set_last_dcef_min_deep_sleep_clk, 0, smu)
#define smu_system_features_control(smu, en) smu_ppt_funcs(system_features_control, 0, smu, en)
#define smu_init_max_sustainable_clocks(smu) smu_ppt_funcs(init_max_sustainable_clocks, 0, smu)
#define smu_set_default_od_settings(smu) smu_ppt_funcs(set_default_od_settings, 0, smu)
#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, param, read_arg)
-#define smu_send_smc_msg(smu, msg, read_arg) smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, 0, read_arg)
-#define smu_alloc_dpm_context(smu) smu_ppt_funcs(alloc_dpm_context, 0, smu)
+#define smu_send_smc_msg(smu, msg, read_arg) smu_ppt_funcs(send_smc_msg, 0, smu, msg, read_arg)
#define smu_init_display_count(smu, count) smu_ppt_funcs(init_display_count, 0, smu, count)
#define smu_feature_set_allowed_mask(smu) smu_ppt_funcs(set_allowed_mask, 0, smu)
#define smu_feature_get_enabled_mask(smu, mask, num) smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num)
+#define smu_feature_is_enabled(smu, mask) smu_ppt_funcs(feature_is_enabled, 0, smu, mask)
+#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
#define smu_set_default_dpm_table(smu) smu_ppt_funcs(set_default_dpm_table, 0, smu)
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
#define smu_set_default_od8_settings(smu) smu_ppt_funcs(set_default_od8_settings, 0, smu)
-#define smu_tables_init(smu, tab) smu_ppt_funcs(tables_init, 0, smu, tab)
#define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu)
#define smu_disable_thermal_alert(smu) smu_ppt_funcs(disable_thermal_alert, 0, smu)
#define smu_smc_read_sensor(smu, sensor, data, size) smu_ppt_funcs(read_sensor, -EINVAL, smu, sensor, data, size)
@@ -68,12 +71,6 @@
#define smu_apply_clocks_adjust_rules(smu) smu_ppt_funcs(apply_clocks_adjust_rules, 0, smu)
#define smu_notify_smc_display_config(smu) smu_ppt_funcs(notify_smc_display_config, 0, smu)
#define smu_set_cpu_power_state(smu) smu_ppt_funcs(set_cpu_power_state, 0, smu)
-#define smu_msg_get_index(smu, msg) smu_ppt_funcs(get_smu_msg_index, -EINVAL, smu, msg)
-#define smu_clk_get_index(smu, clk) smu_ppt_funcs(get_smu_clk_index, -EINVAL, smu, clk)
-#define smu_feature_get_index(smu, fea) smu_ppt_funcs(get_smu_feature_index, -EINVAL, smu, fea)
-#define smu_table_get_index(smu, tab) smu_ppt_funcs(get_smu_table_index, -EINVAL, smu, tab)
-#define smu_power_get_index(smu, src) smu_ppt_funcs(get_smu_power_index, -EINVAL, smu, src)
-#define smu_workload_get_type(smu, type) smu_ppt_funcs(get_workload_type, -EINVAL, smu, type)
#define smu_run_btc(smu) smu_ppt_funcs(run_btc, 0, smu)
#define smu_get_allowed_feature_mask(smu, feature_mask, num) smu_ppt_funcs(get_allowed_feature_mask, 0, smu, feature_mask, num)
#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) smu_ppt_funcs(store_cc6_data, 0, smu, st, cc6_dis, pst_dis, pst_sw_dis)
@@ -82,7 +79,7 @@
#define smu_get_current_shallow_sleep_clocks(smu, clocks) smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks)
#define smu_dpm_set_vcn_enable(smu, enable) smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable)
#define smu_dpm_set_jpeg_enable(smu, enable) smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable)
-#define smu_set_watermarks_table(smu, tab, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, tab, clock_ranges)
+#define smu_set_watermarks_table(smu, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
#define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
#define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)
#define smu_get_dpm_ultimate_freq(smu, param, min, max) smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max)
@@ -91,10 +88,13 @@
#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
#define smu_disable_umc_cdr_12gbps_workaround(smu) smu_ppt_funcs(disable_umc_cdr_12gbps_workaround, 0, smu)
#define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src)
-#define smu_i2c_eeprom_init(smu, control) smu_ppt_funcs(i2c_eeprom_init, 0, smu, control)
-#define smu_i2c_eeprom_fini(smu, control) smu_ppt_funcs(i2c_eeprom_fini, 0, smu, control)
+#define smu_i2c_init(smu, control) smu_ppt_funcs(i2c_init, 0, smu, control)
+#define smu_i2c_fini(smu, control) smu_ppt_funcs(i2c_fini, 0, smu, control)
#define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu)
#define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
#define smu_get_asic_power_limits(smu) smu_ppt_funcs(get_power_limit, 0, smu)
+#define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
+#define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
#endif
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index a7336556dc36..fd82402065e6 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,16 +26,18 @@
#include <linux/reboot.h>
#define SMU_11_0_PARTIAL_PPTABLE
+#define SWSMU_CODE_LAYER_L3
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "smu_v11_0.h"
#include "soc15_common.h"
#include "atom.h"
#include "amdgpu_ras.h"
+#include "smu_cmn.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -65,89 +67,6 @@ MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
-static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
- uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
- return 0;
-}
-
-static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
-{
- struct amdgpu_device *adev = smu->adev;
-
- *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
- return 0;
-}
-
-static int smu_v11_0_wait_for_response(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
-
- for (i = 0; i < timeout; i++) {
- cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
- if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- return cur_value == 0x1 ? 0 : -EIO;
-
- udelay(1);
- }
-
- /* timeout means wrong logic */
- if (i == timeout)
- return -ETIME;
-
- return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
-}
-
-int
-smu_v11_0_send_msg_with_param(struct smu_context *smu,
- enum smu_message_type msg,
- uint32_t param,
- uint32_t *read_arg)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0, index = 0;
-
- index = smu_msg_get_index(smu, msg);
- if (index < 0)
- return index == -EACCES ? 0 : index;
-
- mutex_lock(&smu->message_lock);
- ret = smu_v11_0_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "Msg issuing pre-check failed and "
- "SMU may be not in the right state!\n");
- goto out;
- }
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
- smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
-
- ret = smu_v11_0_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
- smu_get_message_name(smu, msg), index, param, ret);
- goto out;
- }
-
- if (read_arg) {
- ret = smu_v11_0_read_arg(smu, read_arg);
- if (ret) {
- dev_err(adev->dev, "failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
- smu_get_message_name(smu, msg), index, param, ret);
- goto out;
- }
- }
-out:
- mutex_unlock(&smu->message_lock);
- return ret;
-}
-
int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -285,7 +204,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
uint8_t smu_minor, smu_debug;
int ret = 0;
- ret = smu_get_smc_version(smu, &if_version, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
@@ -416,7 +335,7 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
- ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
+ ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
(uint8_t **)&table);
if (ret)
return ret;
@@ -431,70 +350,24 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_dpm_context(struct smu_context *smu)
-{
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
-
- if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
- return -EINVAL;
-
- return smu_alloc_dpm_context(smu);
-}
-
-static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
-{
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
-
- if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
- return -EINVAL;
-
- kfree(smu_dpm->dpm_context);
- kfree(smu_dpm->golden_dpm_context);
- kfree(smu_dpm->dpm_current_power_state);
- kfree(smu_dpm->dpm_request_power_state);
- smu_dpm->dpm_context = NULL;
- smu_dpm->golden_dpm_context = NULL;
- smu_dpm->dpm_context_size = 0;
- smu_dpm->dpm_current_power_state = NULL;
- smu_dpm->dpm_request_power_state = NULL;
-
- return 0;
-}
-
int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *tables = NULL;
+ struct smu_table *tables = smu_table->tables;
int ret = 0;
- tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
- GFP_KERNEL);
- if (!tables) {
- ret = -ENOMEM;
- goto err0_out;
- }
- smu_table->tables = tables;
-
- ret = smu_tables_init(smu, tables);
- if (ret)
- goto err1_out;
-
- ret = smu_v11_0_init_dpm_context(smu);
- if (ret)
- goto err1_out;
-
smu_table->driver_pptable =
kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
if (!smu_table->driver_pptable) {
ret = -ENOMEM;
- goto err2_out;
+ goto err0_out;
}
smu_table->max_sustainable_clocks =
kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
if (!smu_table->max_sustainable_clocks) {
ret = -ENOMEM;
- goto err3_out;
+ goto err1_out;
}
/* Arcturus does not support OVERDRIVE */
@@ -503,29 +376,25 @@ int smu_v11_0_init_smc_tables(struct smu_context *smu)
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
if (!smu_table->overdrive_table) {
ret = -ENOMEM;
- goto err4_out;
+ goto err2_out;
}
smu_table->boot_overdrive_table =
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
if (!smu_table->boot_overdrive_table) {
ret = -ENOMEM;
- goto err5_out;
+ goto err3_out;
}
}
return 0;
-err5_out:
- kfree(smu_table->overdrive_table);
-err4_out:
- kfree(smu_table->max_sustainable_clocks);
err3_out:
- kfree(smu_table->driver_pptable);
+ kfree(smu_table->overdrive_table);
err2_out:
- smu_v11_0_fini_dpm_context(smu);
+ kfree(smu_table->max_sustainable_clocks);
err1_out:
- kfree(tables);
+ kfree(smu_table->driver_pptable);
err0_out:
return ret;
}
@@ -533,10 +402,7 @@ err0_out:
int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- int ret = 0;
-
- if (!smu_table->tables)
- return -EINVAL;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
kfree(smu_table->boot_overdrive_table);
kfree(smu_table->overdrive_table);
@@ -549,17 +415,22 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = NULL;
- kfree(smu_table->tables);
kfree(smu_table->metrics_table);
kfree(smu_table->watermarks_table);
- smu_table->tables = NULL;
smu_table->metrics_table = NULL;
smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
- ret = smu_v11_0_fini_dpm_context(smu);
- if (ret)
- return ret;
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
return 0;
}
@@ -631,7 +502,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
firmwareinfo);
- ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
(uint8_t **)&header);
if (ret)
return ret;
@@ -727,13 +598,13 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh,
address_high,
NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow,
address_low,
NULL);
@@ -744,15 +615,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
address_high, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
address_low, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
(uint32_t)memory_pool->size, NULL);
if (ret)
return ret;
@@ -760,22 +631,11 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-int smu_v11_0_write_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- int ret = 0;
-
- ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
- table_context->driver_pptable, true);
-
- return ret;
-}
-
int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
@@ -789,12 +649,12 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
int ret = 0;
if (driver_table->mc_address) {
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address),
NULL);
if (!ret)
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address),
NULL);
@@ -809,12 +669,12 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
if (tool_table->mc_address) {
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
upper_32_bits(tool_table->mc_address),
NULL);
if (!ret)
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
lower_32_bits(tool_table->mc_address),
NULL);
@@ -835,7 +695,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
if (!smu->pm_enabled)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
return ret;
}
@@ -852,12 +712,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
if (ret)
goto failed;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
feature_mask[0], NULL);
if (ret)
goto failed;
@@ -867,35 +727,6 @@ failed:
return ret;
}
-int smu_v11_0_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
-{
- uint32_t feature_mask_high = 0, feature_mask_low = 0;
- struct smu_feature *feature = &smu->smu_feature;
- int ret = 0;
-
- if (!feature_mask || num < 2)
- return -EINVAL;
-
- if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
- if (ret)
- return ret;
-
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
- if (ret)
- return ret;
-
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
- } else {
- bitmap_copy((unsigned long *)feature_mask, feature->enabled,
- feature->feature_num);
- }
-
- return ret;
-}
-
int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
@@ -903,7 +734,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
uint32_t feature_mask[2];
int ret = 0;
- ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
SMU_MSG_DisableAllSmuFeatures), NULL);
if (ret)
return ret;
@@ -912,7 +743,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
bitmap_zero(feature->supported, feature->feature_num);
if (en) {
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
if (ret)
return ret;
@@ -932,9 +763,9 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
if (!smu->pm_enabled)
return ret;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
return ret;
}
@@ -946,15 +777,17 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
int ret = 0;
int clk_id;
- if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
- (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
+ if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
+ (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
return 0;
- clk_id = smu_clk_get_index(smu, clock_select);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clock_select);
if (clk_id < 0)
return -EINVAL;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
clk_id << 16, clock);
if (ret) {
dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
@@ -965,7 +798,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return 0;
/* if DC limit is zero, return AC limit */
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
clk_id << 16, clock);
if (ret) {
dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
@@ -988,7 +821,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->uclock),
SMU_UCLK);
@@ -999,7 +832,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
}
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->soc_clock),
SMU_SOCCLK);
@@ -1010,7 +843,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
}
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->dcef_clock),
SMU_DCEFCLK);
@@ -1058,17 +891,18 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
int power_src;
int ret = 0;
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
return -EINVAL;
- power_src = smu_power_get_index(smu,
+ power_src = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
smu->adev->pm.ac_power ?
SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC);
if (power_src < 0)
return -EINVAL;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetPptLimit,
power_src << 16,
power_limit);
@@ -1082,12 +916,12 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int ret = 0;
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
return -EOPNOTSUPP;
}
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
return ret;
@@ -1145,8 +979,8 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
enum smu_clk_type clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
switch (clk_type) {
case amd_pp_dcef_clock:
clk_select = SMU_DCEFCLK;
@@ -1198,9 +1032,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
else
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
break;
default:
break;
@@ -1212,7 +1046,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return AMD_FAN_CTRL_MANUAL;
else
return AMD_FAN_CTRL_AUTO;
@@ -1223,10 +1057,10 @@ smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
{
int ret = 0;
- if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
+ if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return 0;
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
if (ret)
dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
__func__, (auto_fan_control ? "Start" : "Stop"));
@@ -1336,7 +1170,7 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
int ret = 0;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
NULL);
@@ -1410,7 +1244,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
{
- return smu_send_smc_msg(smu,
+ return smu_cmn_send_smc_msg(smu,
SMU_MSG_ReenableAcDcInterrupt,
NULL);
}
@@ -1568,14 +1402,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
return ret;
}
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{
- return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
bool smu_v11_0_baco_is_support(struct smu_context *smu)
@@ -1591,8 +1425,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
return false;
/* Arcturus does not support this bit mask */
- if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
- !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
return true;
@@ -1629,21 +1463,15 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
}
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
goto out;
- if (ras && ras->supported) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
- if (ret)
- goto out;
- }
-
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);
@@ -1693,7 +1521,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
if (!ret)
msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
@@ -1707,7 +1535,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
uint32_t param = 0;
uint32_t clock_limit;
- if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
@@ -1734,7 +1562,9 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
return 0;
}
- clk_id = smu_clk_get_index(smu, clk_type);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0) {
ret = -EINVAL;
goto failed;
@@ -1742,13 +1572,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
param = (clk_id & 0xffff) << 16;
if (max) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
if (ret)
goto failed;
}
@@ -1766,7 +1596,12 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
int ret = 0, clk_id = 0;
uint32_t param;
- clk_id = smu_clk_get_index(smu, clk_type);
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0)
return clk_id;
@@ -1775,7 +1610,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
goto out;
@@ -1783,7 +1618,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
goto out;
@@ -1807,16 +1642,18 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
if (min <= 0 && max <= 0)
return -EINVAL;
- if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
return 0;
- clk_id = smu_clk_get_index(smu, clk_type);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0)
return clk_id;
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
param, NULL);
if (ret)
return ret;
@@ -1824,7 +1661,7 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
param, NULL);
if (ret)
return ret;
@@ -1939,11 +1776,13 @@ int smu_v11_0_set_power_source(struct smu_context *smu,
{
int pwr_source;
- pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
+ pwr_source = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ (uint32_t)power_src);
if (pwr_source < 0)
return -EINVAL;
- return smu_send_smc_msg_with_param(smu,
+ return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_NotifyPowerSource,
pwr_source,
NULL);
@@ -1960,16 +1799,18 @@ int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
if (!value)
return -EINVAL;
- if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
return 0;
- clk_id = smu_clk_get_index(smu, clk_type);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0)
return clk_id;
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
param,
value);
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 4e1b11d07438..31456437bb18 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -20,15 +20,17 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#define SWSMU_CODE_LAYER_L3
+
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"
+#include "smu_cmn.h"
#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
@@ -52,86 +54,6 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010024
-int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
- uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
- return 0;
-}
-
-static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
-{
- struct amdgpu_device *adev = smu->adev;
-
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
- return 0;
-}
-
-int smu_v12_0_wait_for_response(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t cur_value, i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
- if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- return cur_value == 0x1 ? 0 : -EIO;
-
- udelay(1);
- }
-
- /* timeout means wrong logic */
- return -ETIME;
-}
-
-int
-smu_v12_0_send_msg_with_param(struct smu_context *smu,
- enum smu_message_type msg,
- uint32_t param,
- uint32_t *read_arg)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0, index = 0;
-
- index = smu_msg_get_index(smu, msg);
- if (index < 0)
- return index;
-
- mutex_lock(&smu->message_lock);
- ret = smu_v12_0_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "Msg issuing pre-check failed and "
- "SMU may be not in the right state!\n");
- goto out;
- }
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
- smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
-
- ret = smu_v12_0_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x param 0x%x\n",
- index, ret, param);
- goto out;
- }
- if (read_arg) {
- ret = smu_v12_0_read_arg(smu, read_arg);
- if (ret) {
- dev_err(adev->dev, "Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
- index, ret, param);
- goto out;
- }
- }
-out:
- mutex_unlock(&smu->message_lock);
- return ret;
-}
-
int smu_v12_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -154,7 +76,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
uint8_t smu_minor, smu_debug;
int ret = 0;
- ret = smu_get_smc_version(smu, &if_version, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
@@ -187,9 +109,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
+ return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
+ return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
}
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
@@ -197,7 +119,7 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
- return smu_v12_0_send_msg_with_param(smu,
+ return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetGfxCGPG,
enable ? 1 : 0,
NULL);
@@ -233,10 +155,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0, timeout = 500;
if (enable) {
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
/* confirm gfx is back to "on" state, timeout is 0.5 second */
while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
@@ -252,36 +174,18 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-int smu_v12_0_init_smc_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *tables = NULL;
-
- if (smu_table->tables)
- return -EINVAL;
-
- tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
- GFP_KERNEL);
- if (!tables)
- return -ENOMEM;
-
- smu_table->tables = tables;
-
- return smu_tables_init(smu, tables);
-}
-
int smu_v12_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- if (!smu_table->tables)
- return -EINVAL;
-
kfree(smu_table->clocks_table);
- kfree(smu_table->tables);
-
smu_table->clocks_table = NULL;
- smu_table->tables = NULL;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->watermarks_table);
+ smu_table->watermarks_table = NULL;
return 0;
}
@@ -290,34 +194,11 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
-}
-
-int smu_v12_0_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
-{
- uint32_t feature_mask_high = 0, feature_mask_low = 0;
- int ret = 0;
-
- if (!feature_mask || num < 2)
- return -EINVAL;
-
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
- if (ret)
- return ret;
-
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
- if (ret)
- return ret;
-
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
-
- return ret;
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}
int smu_v12_0_mode2_reset(struct smu_context *smu){
- return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
}
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -325,42 +206,45 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
{
int ret = 0;
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
if (ret)
return ret;
break;
case SMU_FCLK:
case SMU_MCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_VCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
if (ret)
return ret;
break;
@@ -377,12 +261,12 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
int ret = 0;
if (driver_table->mc_address) {
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address),
NULL);
if (!ret)
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address),
NULL);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index c2e0fbbccf56..cf43629d29d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -522,11 +522,9 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
- if (adev->psp.ras.ras) {
- ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
- if (ret)
- goto err4;
- }
+ ret = smu_v11_0_i2c_control_init(&adev->pm.smu_i2c);
+ if (ret)
+ goto err4;
return 0;
@@ -562,8 +560,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
struct amdgpu_device *adev = hwmgr->adev;
- if (adev->psp.ras.ras)
- smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
+ smu_v11_0_i2c_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 3da71a088b92..0ecc18b55ffb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
/* sclk is bigger than max sclk in the dependence table */
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i - 1].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
@@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
else if (dep_table->entries[i - 1].vddci)
*voltage |= (dep_table->entries[i - 1].vddci *
VOLTAGE_SCALE) << VDDC_SHIFT;
- else
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i - 1].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 5e7ea0459d01..903f4f304647 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -173,8 +173,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
drm_mode_config_reset(drm);
- drm_fbdev_generic_setup(drm, 32);
-
return 0;
}
@@ -225,6 +223,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
+ drm_fbdev_generic_setup(&priv->drm, 32);
return 0;
err_unload:
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index f0a66ef47e5a..25ce42e79995 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -73,8 +73,11 @@ enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode);
-enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+int
+drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status);
struct drm_encoder *
drm_connector_get_single_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index da0d96a69570..88146f7245c5 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,18 +227,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-/**
- * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
- */
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+static int
+__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+ bool force)
{
bool do_delayed;
int ret;
@@ -250,7 +241,16 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
- ret = drm_client_modeset_commit(&fb_helper->client);
+ if (force) {
+ /*
+ * Yes this is the _locked version which expects the master lock
+ * to be held. But for forced restores we're intentionally
+ * racing here, see drm_fb_helper_set_par().
+ */
+ ret = drm_client_modeset_commit_locked(&fb_helper->client);
+ } else {
+ ret = drm_client_modeset_commit(&fb_helper->client);
+ }
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
@@ -262,6 +262,22 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return ret;
}
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
+ *
+ * This should be called from driver's drm &drm_driver.lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * RETURNS:
+ * Zero if everything went ok, negative error code otherwise.
+ */
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+ return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
#ifdef CONFIG_MAGIC_SYSRQ
@@ -1318,6 +1334,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct fb_var_screeninfo *var = &info->var;
+ bool force;
if (oops_in_progress)
return -EBUSY;
@@ -1327,7 +1344,25 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+ /*
+ * Normally we want to make sure that a kms master takes precedence over
+ * fbdev, to avoid fbdev flickering and occasionally stealing the
+ * display status. But Xorg first sets the vt back to text mode using
+ * the KDSET IOCTL with KD_TEXT, and only after that drops the master
+ * status when exiting.
+ *
+ * In the past this was caught by drm_fb_helper_lastclose(), but on
+ * modern systems where logind always keeps a drm fd open to orchestrate
+ * the vt switching, this doesn't work.
+ *
+ * To not break the userspace ABI we have this special case here, which
+ * is only used for the above case. Everything else uses the normal
+ * commit function, which ensures that we never steal the display from
+ * an active drm master.
+ */
+ force = var->activate & FB_ACTIVATE_KD_TEXT;
+
+ __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
return 0;
}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index ffd95bfeaa94..d00ea384dcbf 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
int orientation;
};
-static const struct drm_dmi_panel_orientation_data acer_s1003 = {
- .width = 800,
- .height = 1280,
- .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
-};
-
static const struct drm_dmi_panel_orientation_data asus_t100ha = {
.width = 800,
.height = 1280,
@@ -114,13 +108,19 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
- .driver_data = (void *)&acer_s1003,
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
.driver_data = (void *)&asus_t100ha,
+ }, { /* Asus T101HA */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index e0ed58d291ed..d6017726cc2a 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -86,17 +86,19 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
return MODE_OK;
}
-static enum drm_mode_status
+static int
drm_mode_validate_pipeline(struct drm_display_mode *mode,
- struct drm_connector *connector)
+ struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status)
{
struct drm_device *dev = connector->dev;
- enum drm_mode_status ret = MODE_OK;
struct drm_encoder *encoder;
+ int ret;
/* Step 1: Validate against connector */
- ret = drm_connector_mode_valid(connector, mode);
- if (ret != MODE_OK)
+ ret = drm_connector_mode_valid(connector, mode, ctx, status);
+ if (ret || *status != MODE_OK)
return ret;
/* Step 2: Validate against encoders and crtcs */
@@ -104,8 +106,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_bridge *bridge;
struct drm_crtc *crtc;
- ret = drm_encoder_mode_valid(encoder, mode);
- if (ret != MODE_OK) {
+ *status = drm_encoder_mode_valid(encoder, mode);
+ if (*status != MODE_OK) {
/* No point in continuing for crtc check as this encoder
* will not accept the mode anyway. If all encoders
* reject the mode then, at exit, ret will not be
@@ -114,10 +116,10 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
}
bridge = drm_bridge_chain_get_first_bridge(encoder);
- ret = drm_bridge_chain_mode_valid(bridge,
- &connector->display_info,
- mode);
- if (ret != MODE_OK) {
+ *status = drm_bridge_chain_mode_valid(bridge,
+ &connector->display_info,
+ mode);
+ if (*status != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
continue;
@@ -127,17 +129,17 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
if (!drm_encoder_crtc_ok(encoder, crtc))
continue;
- ret = drm_crtc_mode_valid(crtc, mode);
- if (ret == MODE_OK) {
+ *status = drm_crtc_mode_valid(crtc, mode);
+ if (*status == MODE_OK) {
/* If we get to this point there is at least
* one combination of encoder+crtc that works
* for this mode. Lets return now. */
- return ret;
+ return 0;
}
}
}
- return ret;
+ return 0;
}
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
@@ -198,16 +200,27 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
return encoder_funcs->mode_valid(encoder, mode);
}
-enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+int
+drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status)
{
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
+ int ret = 0;
+
+ if (!connector_funcs)
+ *status = MODE_OK;
+ else if (connector_funcs->mode_valid_ctx)
+ ret = connector_funcs->mode_valid_ctx(connector, mode, ctx,
+ status);
+ else if (connector_funcs->mode_valid)
+ *status = connector_funcs->mode_valid(connector, mode);
+ else
+ *status = MODE_OK;
- if (!connector_funcs || !connector_funcs->mode_valid)
- return MODE_OK;
-
- return connector_funcs->mode_valid(connector, mode);
+ return ret;
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
@@ -385,8 +398,9 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
* (if specified)
* - drm_mode_validate_flag() checks the modes against basic connector
* capabilities (interlace_allowed,doublescan_allowed,stereo_allowed)
- * - the optional &drm_connector_helper_funcs.mode_valid helper can perform
- * driver and/or sink specific checks
+ * - the optional &drm_connector_helper_funcs.mode_valid or
+ * &drm_connector_helper_funcs.mode_valid_ctx helpers can perform driver
+ * and/or sink specific checks
* - the optional &drm_crtc_helper_funcs.mode_valid,
* &drm_bridge_funcs.mode_valid and &drm_encoder_helper_funcs.mode_valid
* helpers can perform driver and/or source specific checks which are also
@@ -517,22 +531,39 @@ retry:
mode_flags |= DRM_MODE_FLAG_3D_MASK;
list_for_each_entry(mode, &connector->modes, head) {
- if (mode->status == MODE_OK)
- mode->status = drm_mode_validate_driver(dev, mode);
+ if (mode->status != MODE_OK)
+ continue;
+
+ mode->status = drm_mode_validate_driver(dev, mode);
+ if (mode->status != MODE_OK)
+ continue;
- if (mode->status == MODE_OK)
- mode->status = drm_mode_validate_size(mode, maxX, maxY);
+ mode->status = drm_mode_validate_size(mode, maxX, maxY);
+ if (mode->status != MODE_OK)
+ continue;
- if (mode->status == MODE_OK)
- mode->status = drm_mode_validate_flag(mode, mode_flags);
+ mode->status = drm_mode_validate_flag(mode, mode_flags);
+ if (mode->status != MODE_OK)
+ continue;
- if (mode->status == MODE_OK)
- mode->status = drm_mode_validate_pipeline(mode,
- connector);
+ ret = drm_mode_validate_pipeline(mode, connector, &ctx,
+ &mode->status);
+ if (ret) {
+ drm_dbg_kms(dev,
+ "drm_mode_validate_pipeline failed: %d\n",
+ ret);
+
+ if (drm_WARN_ON_ONCE(dev, ret != -EDEADLK)) {
+ mode->status = MODE_ERROR;
+ } else {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ }
- if (mode->status == MODE_OK)
- mode->status = drm_mode_validate_ycbcr420(mode,
- connector);
+ if (mode->status != MODE_OK)
+ continue;
+ mode->status = drm_mode_validate_ycbcr420(mode, connector);
}
prune:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 619f81435c1b..58b89ec11b0e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
+ int ret = 0;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
@@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
if (ret)
clear_dma_max_seg_size(subdrv_dev);
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fcee33a43aca..03be31427181 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1498,7 +1498,6 @@ static int g2d_probe(struct platform_device *pdev)
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
- dev_err(dev, "failed to get irq\n");
ret = g2d->irq;
goto err_put_clk;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index a86abc173605..3821ea76a703 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -269,8 +269,10 @@ static void mic_pre_enable(struct drm_bridge *bridge)
goto unlock;
ret = pm_runtime_get_sync(mic->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(mic->dev);
goto unlock;
+ }
mic_set_path(mic, 1);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 249c298e0987..eea13e60187b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -308,8 +308,6 @@ static int hibmc_load(struct drm_device *dev)
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
-
return 0;
err:
@@ -356,6 +354,9 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
ret);
goto err_unload;
}
+
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+
return 0;
err_unload:
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 41a27fd5dbc7..bda4c0e408f8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -112,6 +112,7 @@ gt-y += \
gt/intel_ring_submission.o \
gt/intel_rps.o \
gt/intel_sseu.o \
+ gt/intel_sseu_debugfs.o \
gt/intel_timeline.o \
gt/intel_workarounds.o \
gt/shmem_utils.o \
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 6593e2c38043..c53c85d38fa5 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -722,6 +722,9 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
*/
if (!(power->drrs & BIT(panel_type)))
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+
+ if (bdb->version >= 232)
+ dev_priv->vbt.edp.hobl = power->hobl & BIT(panel_type);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 45f7f33d1144..bb91dace304a 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2080,8 +2080,15 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
* Explicitly stating here that this seems to be currently
* rather a Hack, than final solution.
*/
- if (IS_TIGERLAKE(dev_priv))
- min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+ if (IS_TIGERLAKE(dev_priv)) {
+ /*
+ * Clamp to max_cdclk_freq in case pixel rate is higher,
+ * in order not to break an 8K, but still leave W/A at place.
+ */
+ min_cdclk = max_t(int, min_cdclk,
+ min_t(int, crtc_state->pixel_rate,
+ dev_priv->max_cdclk_freq));
+ }
if (min_cdclk > dev_priv->max_cdclk_freq) {
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 77b04bb3ec62..eccaa79cb4a9 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -264,6 +264,18 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy),
+ ICL_PORT_TX_DW8_ODCC_CLK_SEL |
+ ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
+ ICL_PORT_TX_DW8_ODCC_CLK_SEL |
+ ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
+
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy),
+ DCC_MODE_SELECT_MASK,
+ DCC_MODE_SELECT_CONTINUOSLY);
+ }
+
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
if (phy_is_master(dev_priv, phy)) {
@@ -375,6 +387,19 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy));
+ val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
+ val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
+ val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
+
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ val &= ~DCC_MODE_SELECT_MASK;
+ val |= DCC_MODE_SELECT_CONTINUOSLY;
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ }
+
cnl_set_procmon_ref_values(dev_priv, phy);
if (phy_is_master(dev_priv, phy)) {
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 025d4052f6f8..2c484b55bcdf 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -707,8 +707,10 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] =
};
static const struct ddi_buf_trans *
-bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
return bdw_ddi_translations_edp;
@@ -719,8 +721,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
-skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (IS_SKL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
return skl_y_ddi_translations_dp;
@@ -734,8 +738,10 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
-kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (IS_KBL_ULX(dev_priv) ||
IS_CFL_ULX(dev_priv) ||
IS_CML_ULX(dev_priv)) {
@@ -753,8 +759,10 @@ kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
-skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (dev_priv->vbt.edp.low_vswing) {
if (IS_SKL_ULX(dev_priv) ||
IS_KBL_ULX(dev_priv) ||
@@ -777,9 +785,9 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv))
- return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ return kbl_get_buf_trans_dp(encoder, n_entries);
else
- return skl_get_buf_trans_dp(dev_priv, n_entries);
+ return skl_get_buf_trans_dp(encoder, n_entries);
}
static const struct ddi_buf_trans *
@@ -807,20 +815,21 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries)
}
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
- enum port port, int *n_entries)
+intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
- kbl_get_buf_trans_dp(dev_priv, n_entries);
- *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ kbl_get_buf_trans_dp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
return ddi_translations;
} else if (IS_SKYLAKE(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_dp(dev_priv, n_entries);
- *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ skl_get_buf_trans_dp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
return ddi_translations;
} else if (IS_BROADWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
@@ -835,16 +844,17 @@ intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
}
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
- enum port port, int *n_entries)
+intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (IS_GEN9_BC(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_edp(dev_priv, n_entries);
- *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ skl_get_buf_trans_edp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
return ddi_translations;
} else if (IS_BROADWELL(dev_priv)) {
- return bdw_get_buf_trans_edp(dev_priv, n_entries);
+ return bdw_get_buf_trans_edp(encoder, n_entries);
} else if (IS_HASWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
return hsw_ddi_translations_dp;
@@ -871,9 +881,11 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
}
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
+intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (IS_GEN9_BC(dev_priv)) {
return skl_get_buf_trans_hdmi(dev_priv, n_entries);
} else if (IS_BROADWELL(dev_priv)) {
@@ -889,33 +901,36 @@ intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
}
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
*n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
return bxt_ddi_translations_dp;
}
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
return bxt_ddi_translations_edp;
}
- return bxt_get_buf_trans_dp(dev_priv, n_entries);
+ return bxt_get_buf_trans_dp(encoder, n_entries);
}
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
{
*n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
return bxt_ddi_translations_hdmi;
}
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
@@ -935,8 +950,9 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
@@ -956,8 +972,9 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (dev_priv->vbt.edp.low_vswing) {
@@ -976,14 +993,16 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
return NULL;
} else {
- return cnl_get_buf_trans_dp(dev_priv, n_entries);
+ return cnl_get_buf_trans_dp(encoder, n_entries);
}
}
static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+icl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (type == INTEL_OUTPUT_HDMI) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
return icl_combo_phy_ddi_translations_hdmi;
@@ -1000,7 +1019,7 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
}
static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+icl_get_mg_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI) {
@@ -1016,7 +1035,7 @@ icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
}
static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
@@ -1024,15 +1043,15 @@ ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return ehl_combo_phy_ddi_translations_dp;
}
- return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+ return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
}
static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
- return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+ return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
} else if (rate > 270000) {
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
return tgl_combo_phy_ddi_translations_dp_hbr2;
@@ -1043,7 +1062,7 @@ tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
}
static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+tgl_get_dkl_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI) {
@@ -1066,34 +1085,34 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ tgl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- tgl_get_dkl_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+ tgl_get_dkl_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0,
&n_entries);
default_entry = n_entries - 1;
} else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ icl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+ icl_get_mg_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0,
&n_entries);
default_entry = n_entries - 1;
} else if (IS_CANNONLAKE(dev_priv)) {
- cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+ cnl_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = n_entries - 1;
} else if (IS_GEN9_LP(dev_priv)) {
- bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+ bxt_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = n_entries - 1;
} else if (IS_GEN9_BC(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = 8;
} else if (IS_BROADWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = 7;
} else if (IS_HASWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = 6;
} else {
drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
@@ -1131,10 +1150,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
&n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
+ ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
&n_entries);
else
- ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
+ ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
&n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
@@ -1163,7 +1182,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
enum port port = encoder->port;
const struct ddi_buf_trans *ddi_translations;
- ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -1184,16 +1203,30 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
- i915_reg_t reg = DDI_BUF_CTL(port);
- int i;
+ if (IS_BROXTON(dev_priv)) {
+ udelay(16);
+ return;
+ }
- for (i = 0; i < 16; i++) {
- udelay(1);
- if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
- return;
+ if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE), 8))
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n",
+ port_name(port));
+}
+
+static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ usleep_range(518, 1000);
+ return;
}
- drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
- port_name(port));
+
+ if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE), 500))
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
+ port_name(port));
}
static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
@@ -1394,10 +1427,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->saved_port_bits |
+ intel_dp->DP = dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
}
@@ -2070,9 +2102,8 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
@@ -2085,11 +2116,13 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int n_entries;
if (type == INTEL_OUTPUT_HDMI)
- ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
else if (type == INTEL_OUTPUT_EDP)
- ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+ ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
+ &n_entries);
else
- ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+ ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
+ &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -2105,9 +2138,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
return;
}
- _skl_ddi_set_iboost(dev_priv, port, iboost);
+ _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
- if (port == PORT_A && intel_dig_port->max_lanes == 4)
+ if (encoder->port == PORT_A && dig_port->max_lanes == 4)
_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
@@ -2120,11 +2153,11 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
int n_entries;
if (type == INTEL_OUTPUT_HDMI)
- ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+ ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries);
else if (type == INTEL_OUTPUT_EDP)
- ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries);
+ ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries);
else
- ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
+ ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -2148,36 +2181,36 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- tgl_get_combo_buf_trans(dev_priv, encoder->type,
+ tgl_get_combo_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
else
- tgl_get_dkl_buf_trans(dev_priv, encoder->type,
+ tgl_get_dkl_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
} else if (INTEL_GEN(dev_priv) == 11) {
if (IS_ELKHARTLAKE(dev_priv))
- ehl_get_combo_buf_trans(dev_priv, encoder->type,
+ ehl_get_combo_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
else if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, encoder->type,
+ icl_get_combo_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
else
- icl_get_mg_buf_trans(dev_priv, encoder->type,
+ icl_get_mg_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
} else if (IS_CANNONLAKE(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
- cnl_get_buf_trans_edp(dev_priv, &n_entries);
+ cnl_get_buf_trans_edp(encoder, &n_entries);
else
- cnl_get_buf_trans_dp(dev_priv, &n_entries);
+ cnl_get_buf_trans_dp(encoder, &n_entries);
} else if (IS_GEN9_LP(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
- bxt_get_buf_trans_edp(dev_priv, &n_entries);
+ bxt_get_buf_trans_edp(encoder, &n_entries);
else
- bxt_get_buf_trans_dp(dev_priv, &n_entries);
+ bxt_get_buf_trans_dp(encoder, &n_entries);
} else {
if (encoder->type == INTEL_OUTPUT_EDP)
- intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+ intel_ddi_get_buf_trans_edp(encoder, &n_entries);
else
- intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+ intel_ddi_get_buf_trans_dp(encoder, &n_entries);
}
if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
@@ -2210,11 +2243,11 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
u32 val;
if (type == INTEL_OUTPUT_HDMI)
- ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+ ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries);
else if (type == INTEL_OUTPUT_EDP)
- ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
+ ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries);
else
- ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
+ ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -2331,22 +2364,23 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
}
-static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum phy phy, int type,
- int rate)
+static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
+ u32 level, int type, int rate)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val;
int ln;
if (INTEL_GEN(dev_priv) >= 12)
- ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate,
+ ddi_translations = tgl_get_combo_buf_trans(encoder, type, rate,
&n_entries);
else if (IS_ELKHARTLAKE(dev_priv))
- ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate,
+ ddi_translations = ehl_get_combo_buf_trans(encoder, type, rate,
&n_entries);
else
- ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+ ddi_translations = icl_get_combo_buf_trans(encoder, type, rate,
&n_entries);
if (!ddi_translations)
return;
@@ -2458,7 +2492,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
+ icl_ddi_combo_vswing_program(encoder, level, type, rate);
/* 6. Set training enable to trigger update */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
@@ -2482,7 +2516,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
rate = intel_dp->link_rate;
}
- ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
+ ddi_translations = icl_get_mg_buf_trans(encoder, type, rate,
&n_entries);
/* The table does not have values for level 3 and level 9. */
if (level >= n_entries || level == 3 || level == 9) {
@@ -2627,7 +2661,7 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
rate = intel_dp->link_rate;
}
- ddi_translations = tgl_get_dkl_buf_trans(dev_priv, encoder->type, rate,
+ ddi_translations = tgl_get_dkl_buf_trans(encoder, encoder->type, rate,
&n_entries);
if (level >= n_entries)
@@ -3000,15 +3034,15 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
static void
-icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
+icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
u32 ln0, ln1, pin_assignment;
u8 width;
- if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
+ if (dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
if (INTEL_GEN(dev_priv) >= 12) {
@@ -3027,13 +3061,13 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */
- pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
+ pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
width = crtc_state->lane_count;
switch (pin_assignment) {
case 0x0:
drm_WARN_ON(&dev_priv->drm,
- intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ dig_port->tc_mode != TC_PORT_LEGACY);
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
@@ -3978,10 +4012,9 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->base.port;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
u32 dp_tp_ctl, ddi_buf_ctl;
bool wait = false;
@@ -4020,7 +4053,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
- udelay(600);
+ intel_wait_ddi_buf_active(dev_priv, port);
}
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
@@ -4536,42 +4569,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
};
static struct intel_connector *
-intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_connector *connector;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
return NULL;
- intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
- intel_dig_port->dp.prepare_link_retrain =
- intel_ddi_prepare_link_retrain;
- intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
- intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
+ dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
+ dig_port->dp.set_link_train = intel_ddi_set_link_train;
+ dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
if (INTEL_GEN(dev_priv) >= 12)
- intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
+ dig_port->dp.set_signal_levels = tgl_set_signal_levels;
else if (INTEL_GEN(dev_priv) >= 11)
- intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
+ dig_port->dp.set_signal_levels = icl_set_signal_levels;
else if (IS_CANNONLAKE(dev_priv))
- intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
+ dig_port->dp.set_signal_levels = cnl_set_signal_levels;
else if (IS_GEN9_LP(dev_priv))
- intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
+ dig_port->dp.set_signal_levels = bxt_set_signal_levels;
else
- intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
+ dig_port->dp.set_signal_levels = hsw_set_signal_levels;
- intel_dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
- intel_dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
+ dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
+ dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
if (INTEL_GEN(dev_priv) < 12) {
- intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
- intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
+ dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
+ dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
}
- if (!intel_dp_init_connector(intel_dig_port, connector)) {
+ if (!intel_dp_init_connector(dig_port, connector)) {
kfree(connector);
return NULL;
}
@@ -4770,29 +4802,29 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
}
static struct intel_connector *
-intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
{
struct intel_connector *connector;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
return NULL;
- intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
- intel_hdmi_init_connector(intel_dig_port, connector);
+ dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+ intel_hdmi_init_connector(dig_port, connector);
return connector;
}
-static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
+static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- if (dport->base.port != PORT_A)
+ if (dig_port->base.port != PORT_A)
return false;
- if (dport->saved_port_bits & DDI_A_4_LANES)
+ if (dig_port->saved_port_bits & DDI_A_4_LANES)
return false;
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
@@ -4814,10 +4846,10 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
}
static int
-intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
+intel_ddi_max_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev);
- enum port port = intel_dport->base.port;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
int max_lanes = 4;
if (INTEL_GEN(dev_priv) >= 11)
@@ -4836,10 +4868,10 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
* wasn't lit up at boot. Force this bit set when needed
* so we use the proper lane count for our calculations.
*/
- if (intel_ddi_a_force_4_lanes(intel_dport)) {
+ if (intel_ddi_a_force_4_lanes(dig_port)) {
drm_dbg_kms(&dev_priv->drm,
"Forcing DDI_A_4_LANES for port A\n");
- intel_dport->saved_port_bits |= DDI_A_4_LANES;
+ dig_port->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
@@ -4848,7 +4880,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -4877,11 +4909,11 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
return;
}
- intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
- if (!intel_dig_port)
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
return;
- encoder = &intel_dig_port->base;
+ encoder = &dig_port->base;
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
@@ -4908,49 +4940,49 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
- intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
- DDI_BUF_CTL(port)) &
- DDI_BUF_PORT_REVERSAL;
+ dig_port->saved_port_bits =
+ intel_de_read(dev_priv, DDI_BUF_CTL(port))
+ & DDI_BUF_PORT_REVERSAL;
else
- intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
- DDI_BUF_CTL(port)) &
- (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+ dig_port->saved_port_bits =
+ intel_de_read(dev_priv, DDI_BUF_CTL(port))
+ & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
- intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
- intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
- intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy =
!intel_bios_port_supports_typec_usb(dev_priv, port) &&
!intel_bios_port_supports_tbt(dev_priv, port);
- intel_tc_port_init(intel_dig_port, is_legacy);
+ intel_tc_port_init(dig_port, is_legacy);
encoder->update_prepare = intel_ddi_update_prepare;
encoder->update_complete = intel_ddi_update_complete;
}
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
- intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
+ dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
port - PORT_A;
if (init_dp) {
- if (!intel_ddi_init_dp_connector(intel_dig_port))
+ if (!intel_ddi_init_dp_connector(dig_port))
goto err;
- intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
}
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+ if (!intel_ddi_init_hdmi_connector(dig_port))
goto err;
}
if (init_lspcon) {
- if (lspcon_init(intel_dig_port))
+ if (lspcon_init(dig_port))
/* TODO: handle hdmi info frame part */
drm_dbg_kms(&dev_priv->drm,
"LSPCON init success on port %c\n",
@@ -4967,26 +4999,26 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (INTEL_GEN(dev_priv) >= 11) {
if (intel_phy_is_tc(dev_priv, phy))
- intel_dig_port->connected = intel_tc_port_connected;
+ dig_port->connected = intel_tc_port_connected;
else
- intel_dig_port->connected = lpt_digital_port_connected;
+ dig_port->connected = lpt_digital_port_connected;
} else if (INTEL_GEN(dev_priv) >= 8) {
if (port == PORT_A || IS_GEN9_LP(dev_priv))
- intel_dig_port->connected = bdw_digital_port_connected;
+ dig_port->connected = bdw_digital_port_connected;
else
- intel_dig_port->connected = lpt_digital_port_connected;
+ dig_port->connected = lpt_digital_port_connected;
} else {
if (port == PORT_A)
- intel_dig_port->connected = hsw_digital_port_connected;
+ dig_port->connected = hsw_digital_port_connected;
else
- intel_dig_port->connected = lpt_digital_port_connected;
+ dig_port->connected = lpt_digital_port_connected;
}
- intel_infoframe_init(intel_dig_port);
+ intel_infoframe_init(dig_port);
return;
err:
drm_encoder_cleanup(&encoder->base);
- kfree(intel_dig_port);
+ kfree(dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 84e2a17b5ecb..729ec6e0d43a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1612,13 +1612,13 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport,
+ struct intel_digital_port *dig_port,
unsigned int expected_mask)
{
u32 port_mask;
i915_reg_t dpll_reg;
- switch (dport->base.port) {
+ switch (dig_port->base.port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
@@ -1640,7 +1640,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
port_mask, expected_mask, 1000))
drm_WARN(&dev_priv->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
- dport->base.base.base.id, dport->base.base.name,
+ dig_port->base.base.base.id, dig_port->base.base.name,
intel_de_read(dev_priv, dpll_reg) & port_mask,
expected_mask);
}
@@ -10073,7 +10073,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
- if (crtc_state->limited_color_range)
+ if (crtc_state->limited_color_range &&
+ !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= PIPECONF_COLOR_RANGE_SELECT;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -16332,7 +16333,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+ INTEL_NUM_PIPES(dev_priv) == 2)
plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
plane->i9xx_plane = (enum i9xx_plane_id) pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f68007ff8a13..e890c8fb779b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -542,7 +542,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport,
+ struct intel_digital_port *dig_port,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index d1cb48b3f462..3644752cc5ec 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -1194,7 +1194,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
@@ -1207,14 +1207,14 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
continue;
- intel_dig_port = enc_to_dig_port(intel_encoder);
- if (!intel_dig_port->dp.can_mst)
+ dig_port = enc_to_dig_port(intel_encoder);
+ if (!dig_port->dp.can_mst)
continue;
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 8a277dfbc070..0c713e83274d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1817,8 +1817,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+ enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 4b0aaa3081c9..e8f809161c75 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -279,10 +279,10 @@ enum check_link_response {
*/
struct intel_hdcp_shim {
/* Outputs the transmitter's An and Aksv values to the receiver. */
- int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
+ int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an);
/* Reads the receiver's key selection vector */
- int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
+ int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv);
/*
* Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
@@ -290,52 +290,52 @@ struct intel_hdcp_shim {
* different. Call it BSTATUS since that's the name the HDMI spec
* uses and it was there first.
*/
- int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
+ int (*read_bstatus)(struct intel_digital_port *dig_port,
u8 *bstatus);
/* Determines whether a repeater is present downstream */
- int (*repeater_present)(struct intel_digital_port *intel_dig_port,
+ int (*repeater_present)(struct intel_digital_port *dig_port,
bool *repeater_present);
/* Reads the receiver's Ri' value */
- int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
+ int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri);
/* Determines if the receiver's KSV FIFO is ready for consumption */
- int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
+ int (*read_ksv_ready)(struct intel_digital_port *dig_port,
bool *ksv_ready);
/* Reads the ksv fifo for num_downstream devices */
- int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
+ int (*read_ksv_fifo)(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo);
/* Reads a 32-bit part of V' from the receiver */
- int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
+ int (*read_v_prime_part)(struct intel_digital_port *dig_port,
int i, u32 *part);
/* Enables HDCP signalling on the port */
- int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
+ int (*toggle_signalling)(struct intel_digital_port *dig_port,
bool enable);
/* Ensures the link is still protected */
- bool (*check_link)(struct intel_digital_port *intel_dig_port);
+ bool (*check_link)(struct intel_digital_port *dig_port);
/* Detects panel's hdcp capability. This is optional for HDMI. */
- int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
+ int (*hdcp_capable)(struct intel_digital_port *dig_port,
bool *hdcp_capable);
/* HDCP adaptation(DP/HDMI) required on the port */
enum hdcp_wired_protocol protocol;
/* Detects whether sink is HDCP2.2 capable */
- int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
+ int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port,
bool *capable);
/* Write HDCP2.2 messages */
- int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ int (*write_2_2_msg)(struct intel_digital_port *dig_port,
void *buf, size_t size);
/* Read HDCP2.2 messages */
- int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ int (*read_2_2_msg)(struct intel_digital_port *dig_port,
u8 msg_id, void *buf, size_t size);
/*
@@ -343,11 +343,11 @@ struct intel_hdcp_shim {
* type to Receivers. In DP HDCP2.2 Stream type is one of the input to
* the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
*/
- int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
+ int (*config_stream_type)(struct intel_digital_port *dig_port,
bool is_repeater, u8 type);
/* HDCP2.2 Link Integrity Check */
- int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
+ int (*check_2_2_link)(struct intel_digital_port *dig_port);
};
struct intel_hdcp {
@@ -1434,9 +1434,9 @@ struct intel_dp_mst_encoder {
};
static inline enum dpio_channel
-vlv_dport_to_channel(struct intel_digital_port *dport)
+vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
{
- switch (dport->base.port) {
+ switch (dig_port->base.port) {
case PORT_B:
case PORT_D:
return DPIO_CH0;
@@ -1448,9 +1448,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
}
static inline enum dpio_phy
-vlv_dport_to_phy(struct intel_digital_port *dport)
+vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
{
- switch (dport->base.port) {
+ switch (dig_port->base.port) {
case PORT_B:
case PORT_C:
return DPIO_PHY0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index c9b93c5706af..d6295eb20b63 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -137,14 +137,12 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4};
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
- *
- * This function is not safe to use prior to encoder type being set.
*/
bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
+ return dig_port->base.type == INTEL_OUTPUT_EDP;
}
static void intel_dp_link_down(struct intel_encoder *encoder,
@@ -218,10 +216,10 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- int source_max = intel_dig_port->max_lanes;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ int source_max = dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
- int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
+ int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
return min3(source_max, sink_max, fia_max);
}
@@ -253,8 +251,8 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
static int
intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_dotclk = dev_priv->max_dotclk_freq;
int ds_max_dotclk;
@@ -780,7 +778,7 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -790,14 +788,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (drm_WARN(&dev_priv->drm,
intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name))
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name))
return;
drm_dbg_kms(&dev_priv->drm,
"kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -893,7 +891,7 @@ static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -922,8 +920,8 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm,
"picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(intel_dp->pps_pipe),
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -1011,8 +1009,8 @@ static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1033,15 +1031,15 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
if (intel_dp->pps_pipe == INVALID_PIPE) {
drm_dbg_kms(&dev_priv->drm,
"no initial power sequencer for [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
return;
}
drm_dbg_kms(&dev_priv->drm,
"initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -1306,9 +1304,9 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
- to_i915(intel_dig_port->base.base.dev);
+ to_i915(dig_port->base.base.dev);
u32 precharge, timeout;
if (IS_GEN(dev_priv, 6))
@@ -1336,10 +1334,10 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 unused)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *i915 =
- to_i915(intel_dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+ to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
u32 ret;
ret = DP_AUX_CH_CTL_SEND_BUSY |
@@ -1353,7 +1351,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
if (intel_phy_is_tc(i915, phy) &&
- intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
+ dig_port->tc_mode == TC_PORT_TBT_ALT)
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
@@ -1365,11 +1363,11 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u8 *recv, int recv_size,
u32 aux_send_ctl_flags)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *i915 =
- to_i915(intel_dig_port->base.base.dev);
+ to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
- enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
@@ -1386,9 +1384,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
if (is_tc_port)
- intel_tc_port_lock(intel_dig_port);
+ intel_tc_port_lock(dig_port);
- aux_domain = intel_aux_power_domain(intel_dig_port);
+ aux_domain = intel_aux_power_domain(dig_port);
aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
@@ -1547,7 +1545,7 @@ out:
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
if (is_tc_port)
- intel_tc_port_unlock(intel_dig_port);
+ intel_tc_port_unlock(dig_port);
return ret;
}
@@ -2893,7 +2891,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -2910,11 +2908,11 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
return need_to_disable;
intel_display_power_get(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ intel_aux_power_domain(dig_port));
drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2936,8 +2934,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!edp_have_panel_power(intel_dp)) {
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] panel power wasn't enabled\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2970,7 +2968,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -2983,8 +2981,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
return;
drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -3004,7 +3002,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
intel_dp->panel_power_off_time = ktime_get_boottime();
intel_display_power_put_unchecked(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ intel_aux_power_domain(dig_port));
}
static void edp_panel_vdd_work(struct work_struct *__work)
@@ -3835,8 +3833,8 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state,
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
@@ -3858,8 +3856,8 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
*/
drm_dbg_kms(&dev_priv->drm,
"detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
intel_de_write(dev_priv, pp_on_reg, 0);
intel_de_posting_read(dev_priv, pp_on_reg);
@@ -4925,7 +4923,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct dp_sdp sdp = {};
ssize_t len;
@@ -4951,14 +4949,14 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, len < 0))
return;
- intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
+ dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
}
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct dp_sdp sdp = {};
ssize_t len;
@@ -4968,7 +4966,7 @@ void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, len < 0))
return;
- intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
+ dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
&sdp, len);
}
@@ -5128,7 +5126,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_VSC;
@@ -5143,7 +5141,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
@@ -5155,7 +5153,7 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
struct intel_crtc_state *crtc_state,
struct hdmi_drm_infoframe *drm_infoframe)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
struct dp_sdp sdp = {};
@@ -5165,8 +5163,8 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
- sizeof(sdp));
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+ sizeof(sdp));
ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
sizeof(sdp));
@@ -5368,10 +5366,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_dp_phy_test_params *data =
&intel_dp->compliance.test_data.phytest;
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
u32 pattern_val;
@@ -5433,10 +5431,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
static void
intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
@@ -5459,11 +5457,11 @@ intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
static void
intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->base.port;
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum port port = dig_port->base.port;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
@@ -6334,10 +6332,10 @@ intel_dp_connector_unregister(struct drm_connector *connector)
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+ struct intel_dp *intel_dp = &dig_port->dp;
- intel_dp_mst_encoder_cleanup(intel_dig_port);
+ intel_dp_mst_encoder_cleanup(dig_port);
if (intel_dp_is_edp(intel_dp)) {
intel_wakeref_t wakeref;
@@ -6396,11 +6394,11 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
}
static
-int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
u8 *an)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base));
static const struct drm_dp_aux_msg msg = {
.request = DP_AUX_NATIVE_WRITE,
.address = DP_AUX_HDCP_AKSV,
@@ -6411,7 +6409,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
int ret;
/* Output An first, that's easy */
- dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
+ dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
drm_dbg_kms(&i915->drm,
@@ -6450,13 +6448,13 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
return 0;
}
-static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
drm_dbg_kms(&i915->drm,
@@ -6466,10 +6464,10 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
return 0;
}
-static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
u8 *bstatus)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
/*
@@ -6477,7 +6475,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
* definition by different names. In the HDMI spec, it's called BSTATUS,
* but in DP it's called BINFO.
*/
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
drm_dbg_kms(&i915->drm,
@@ -6488,13 +6486,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
u8 *bcaps)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
drm_dbg_kms(&i915->drm,
@@ -6506,13 +6504,13 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
if (ret)
return ret;
@@ -6521,13 +6519,13 @@ int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
u8 *ri_prime)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
@@ -6538,14 +6536,14 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
bool *ksv_ready)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
drm_dbg_kms(&i915->drm,
@@ -6557,17 +6555,17 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
int i;
/* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
for (i = 0; i < num_downstream; i += 3) {
size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_AUX_HDCP_KSV_FIFO,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
@@ -6582,16 +6580,16 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
int i, u32 *part)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
return -EINVAL;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
@@ -6603,7 +6601,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
bool enable)
{
/* Not used for single stream DisplayPort setups */
@@ -6611,13 +6609,13 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
}
static
-bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
drm_dbg_kms(&i915->drm,
@@ -6629,13 +6627,13 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
}
static
-int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
bool *hdcp_capable)
{
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
if (ret)
return ret;
@@ -6693,13 +6691,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
};
static int
-intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
u8 *rx_status)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
HDCP_2_2_DP_RXSTATUS_LEN);
if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
@@ -6712,14 +6710,14 @@ intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
}
static
-int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
u8 msg_id, bool *msg_ready)
{
u8 rx_status;
int ret;
*msg_ready = false;
- ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
if (ret < 0)
return ret;
@@ -6745,11 +6743,11 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
}
static ssize_t
-intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_dp *dp = &intel_dig_port->dp;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *dp = &dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
int ret, timeout;
@@ -6773,7 +6771,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
* the timeout at wait for CP_IRQ.
*/
intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
- ret = hdcp2_detect_msg_availability(intel_dig_port,
+ ret = hdcp2_detect_msg_availability(dig_port,
msg_id, &msg_ready);
if (!msg_ready)
ret = -ETIMEDOUT;
@@ -6799,10 +6797,10 @@ static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
}
static
-int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
void *buf, size_t size)
{
- struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_dp *dp = &dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
@@ -6825,7 +6823,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
- ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_write(&dig_port->dp.aux,
offset, (void *)byte, len);
if (ret < 0)
return ret;
@@ -6839,13 +6837,13 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
}
static
-ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
+ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
{
u8 rx_info[HDCP_2_2_RXINFO_LEN];
u32 dev_cnt;
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_HDCP_2_2_REG_RXINFO_OFFSET,
(void *)rx_info, HDCP_2_2_RXINFO_LEN);
if (ret != HDCP_2_2_RXINFO_LEN)
@@ -6865,10 +6863,10 @@ ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
}
static
-int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
u8 msg_id, void *buf, size_t size)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -6879,12 +6877,12 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
return -EINVAL;
offset = hdcp2_msg_data->offset;
- ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
+ ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
if (ret < 0)
return ret;
if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
- ret = get_receiver_id_list_size(intel_dig_port);
+ ret = get_receiver_id_list_size(dig_port);
if (ret < 0)
return ret;
@@ -6899,7 +6897,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
(void *)byte, len);
if (ret < 0) {
drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
@@ -6918,7 +6916,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
bool is_repeater, u8 content_type)
{
int ret;
@@ -6937,7 +6935,7 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
stream_type_msg.stream_type = content_type;
- ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
sizeof(stream_type_msg));
return ret < 0 ? ret : 0;
@@ -6945,12 +6943,12 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
{
u8 rx_status;
int ret;
- ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
if (ret)
return ret;
@@ -6965,14 +6963,14 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
}
static
-int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
bool *capable)
{
u8 rx_caps[3];
int ret;
*capable = false;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
rx_caps, HDCP_2_2_RXCAPS_LEN);
if (ret != HDCP_2_2_RXCAPS_LEN)
@@ -7251,12 +7249,12 @@ static bool intel_edp_have_power(struct intel_dp *intel_dp)
}
enum irqreturn
-intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
- if (intel_dig_port->base.type == INTEL_OUTPUT_EDP &&
+ if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd || !intel_edp_have_power(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
@@ -7267,14 +7265,14 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
drm_dbg_kms(&i915->drm,
"ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
long_hpd ? "long" : "short",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
return IRQ_HANDLED;
}
drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
long_hpd ? "long" : "short");
if (long_hpd) {
@@ -8137,12 +8135,12 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
}
bool
-intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
@@ -8153,12 +8151,14 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
INIT_WORK(&intel_connector->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
- if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
+ if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
return false;
+ intel_dp_set_source_rates(intel_dp);
+
intel_dp->reset_link_params = true;
intel_dp->pps_pipe = INVALID_PIPE;
intel_dp->active_pipe = INVALID_PIPE;
@@ -8174,22 +8174,28 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
*/
drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
- intel_encoder->type = INTEL_OUTPUT_EDP;
-
- /* eDP only on port B and/or C on vlv/chv */
- if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) &&
- port != PORT_B && port != PORT_C))
- return false;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
}
- intel_dp_set_source_rates(intel_dp);
-
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ /*
+ * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
+ * for DP the encoder type can be set by the caller to
+ * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
+ */
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+
+ /* eDP only on port B and/or C on vlv/chv */
+ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp_is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
+ return false;
+
drm_dbg_kms(&dev_priv->drm,
"Adding %s connector on [ENCODER:%d:%s]\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
@@ -8218,12 +8224,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- intel_dp_mst_encoder_init(intel_dig_port,
+ intel_dp_mst_encoder_init(dig_port,
intel_connector->base.base.id);
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
- intel_dp_mst_encoder_cleanup(intel_dig_port);
+ intel_dp_mst_encoder_cleanup(dig_port);
goto fail;
}
@@ -8258,20 +8264,20 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg,
enum port port)
{
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
- intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
- if (!intel_dig_port)
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
- intel_encoder = &intel_dig_port->base;
+ intel_encoder = &dig_port->base;
encoder = &intel_encoder->base;
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -8307,34 +8313,34 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
- intel_dig_port->dp.set_link_train = cpt_set_link_train;
+ dig_port->dp.set_link_train = cpt_set_link_train;
else
- intel_dig_port->dp.set_link_train = g4x_set_link_train;
+ dig_port->dp.set_link_train = g4x_set_link_train;
if (IS_CHERRYVIEW(dev_priv))
- intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
+ dig_port->dp.set_signal_levels = chv_set_signal_levels;
else if (IS_VALLEYVIEW(dev_priv))
- intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+ dig_port->dp.set_signal_levels = vlv_set_signal_levels;
else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+ dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
else if (IS_GEN(dev_priv, 6) && port == PORT_A)
- intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+ dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
else
- intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+ dig_port->dp.set_signal_levels = g4x_set_signal_levels;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
(HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
- intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
- intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3;
+ dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
+ dig_port->dp.voltage_max = intel_dp_voltage_max_3;
} else {
- intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
- intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2;
+ dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
+ dig_port->dp.voltage_max = intel_dp_voltage_max_2;
}
- intel_dig_port->dp.output_reg = output_reg;
- intel_dig_port->max_lanes = 4;
- intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
- intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
+ dig_port->dp.output_reg = output_reg;
+ dig_port->max_lanes = 4;
+ dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
+ dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
intel_encoder->type = INTEL_OUTPUT_DP;
intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -8349,25 +8355,25 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->cloneable = 0;
intel_encoder->port = port;
- intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (HAS_GMCH(dev_priv)) {
if (IS_GM45(dev_priv))
- intel_dig_port->connected = gm45_digital_port_connected;
+ dig_port->connected = gm45_digital_port_connected;
else
- intel_dig_port->connected = g4x_digital_port_connected;
+ dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
- intel_dig_port->connected = ilk_digital_port_connected;
+ dig_port->connected = ilk_digital_port_connected;
else
- intel_dig_port->connected = ibx_digital_port_connected;
+ dig_port->connected = ibx_digital_port_connected;
}
if (port != PORT_A)
- intel_infoframe_init(intel_dig_port);
+ intel_infoframe_init(dig_port);
- intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- if (!intel_dp_init_connector(intel_dig_port, intel_connector))
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ if (!intel_dp_init_connector(dig_port, intel_connector))
goto err_init_connector;
return true;
@@ -8377,7 +8383,7 @@ err_init_connector:
err_encoder_init:
kfree(intel_connector);
err_connector_alloc:
- kfree(intel_dig_port);
+ kfree(dig_port);
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 0a8950f744f6..b901ab850cbd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -40,7 +40,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
enum pipe *pipe);
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
enum port port);
-bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+bool intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, u8 lane_count,
@@ -61,7 +61,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2493142a70e9..a23ed7290843 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -52,6 +52,7 @@ static u8 dp_voltage_max(u8 preemph)
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 v = 0;
u8 p = 0;
int lane;
@@ -64,12 +65,20 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
}
preemph_max = intel_dp->preemph_max(intel_dp);
+ drm_WARN_ON_ONCE(&i915->drm,
+ preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
+ preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
+
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
v = min(v, dp_voltage_max(p));
voltage_max = intel_dp->voltage_max(intel_dp);
+ drm_WARN_ON_ONCE(&i915->drm,
+ voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
+ voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
+
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8273f2e07427..a2d91a499700 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -342,8 +342,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
@@ -369,8 +369,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -421,7 +421,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* the transcoder clock select is set to none.
*/
if (last_mst_stream)
- intel_dp_set_infoframes(&intel_dig_port->base, false,
+ intel_dp_set_infoframes(&dig_port->base, false,
old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
@@ -436,7 +436,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_mst->connector = NULL;
if (last_mst_stream)
- intel_dig_port->base.post_disable(state, &intel_dig_port->base,
+ dig_port->base.post_disable(state, &dig_port->base,
old_crtc_state, NULL);
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
@@ -449,11 +449,11 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
if (intel_dp->active_mst_links == 0)
- intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
+ dig_port->base.pre_pll_enable(state, &dig_port->base,
pipe_config, NULL);
}
@@ -463,8 +463,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
@@ -490,7 +490,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
if (first_mst_stream)
- intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
+ dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
@@ -506,7 +506,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
/*
* Before Gen 12 this is not done as part of
- * intel_dig_port->base.pre_enable() and should be done here. For
+ * dig_port->base.pre_enable() and should be done here. For
* Gen 12+ the step in which this should be done is different for the
* first MST stream, so it's done on the DDI for the first stream and
* here for the following ones.
@@ -525,8 +525,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
@@ -572,9 +572,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_digital_port *dig_port = intel_mst->primary;
- intel_ddi_get_config(&intel_dig_port->base, pipe_config);
+ intel_ddi_get_config(&dig_port->base, pipe_config);
}
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
@@ -639,39 +639,60 @@ static int intel_dp_mst_get_modes(struct drm_connector *connector)
return intel_dp_mst_get_ddc_modes(connector);
}
-static enum drm_mode_status
-intel_dp_mst_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static int
+intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+ struct drm_dp_mst_port *port = intel_connector->port;
+ const int min_bpp = 18;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
+ int ret;
- if (drm_connector_is_unregistered(connector))
- return MODE_ERROR;
+ if (drm_connector_is_unregistered(connector)) {
+ *status = MODE_ERROR;
+ return 0;
+ }
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ *status = MODE_NO_DBLESCAN;
+ return 0;
+ }
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
- mode_rate = intel_dp_link_required(mode->clock, 18);
+ mode_rate = intel_dp_link_required(mode->clock, min_bpp);
- /* TODO - validate mode against available PBN for link */
- if (mode->clock < 10000)
- return MODE_CLOCK_LOW;
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ return ret;
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- return MODE_H_ILLEGAL;
+ if (mode_rate > max_rate || mode->clock > max_dotclk ||
+ drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
+ *status = MODE_CLOCK_HIGH;
+ return 0;
+ }
+
+ if (mode->clock < 10000) {
+ *status = MODE_CLOCK_LOW;
+ return 0;
+ }
- if (mode_rate > max_rate || mode->clock > max_dotclk)
- return MODE_CLOCK_HIGH;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ *status = MODE_H_ILLEGAL;
+ return 0;
+ }
- return intel_mode_valid_max_plane_size(dev_priv, mode);
+ *status = intel_mode_valid_max_plane_size(dev_priv, mode);
+ return 0;
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
@@ -700,7 +721,7 @@ intel_dp_mst_detect(struct drm_connector *connector,
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
- .mode_valid = intel_dp_mst_mode_valid,
+ .mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
.detect_ctx = intel_dp_mst_detect,
@@ -732,8 +753,8 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
@@ -808,11 +829,11 @@ static const struct drm_dp_mst_topology_cbs mst_cbs = {
};
static struct intel_dp_mst_encoder *
-intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
{
struct intel_dp_mst_encoder *intel_mst;
struct intel_encoder *intel_encoder;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = dig_port->base.base.dev;
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
@@ -821,14 +842,14 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_mst->pipe = pipe;
intel_encoder = &intel_mst->base;
- intel_mst->primary = intel_dig_port;
+ intel_mst->primary = dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
- intel_encoder->power_domain = intel_dig_port->base.power_domain;
- intel_encoder->port = intel_dig_port->base.port;
+ intel_encoder->power_domain = dig_port->base.power_domain;
+ intel_encoder->port = dig_port->base.port;
intel_encoder->cloneable = 0;
/*
* This is wrong, but broken userspace uses the intersection
@@ -855,29 +876,29 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
}
static bool
-intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
{
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe)
- intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
+ intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
return true;
}
int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
{
- return intel_dig_port->dp.active_mst_links;
+ return dig_port->dp.active_mst_links;
}
int
-intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- enum port port = intel_dig_port->base.port;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ enum port port = dig_port->base.port;
int ret;
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
@@ -892,7 +913,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
- intel_dp_create_fake_mst_encoders(intel_dig_port);
+ intel_dp_create_fake_mst_encoders(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3, conn_base_id);
if (ret)
@@ -904,9 +925,9 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
}
void
-intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
{
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_dp *intel_dp = &dig_port->dp;
if (!intel_dp->can_mst)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 854724f68f09..6afda4e86b3c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -11,9 +11,9 @@
struct intel_digital_port;
struct intel_crtc_state;
-int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
-void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
+int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
+int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 399a7edb4568..7910522273b2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -650,9 +650,9 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
bool uniq_trans_scale)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = intel_crtc->pipe;
u32 val;
int i;
@@ -746,7 +746,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -789,10 +789,10 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -803,7 +803,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* Otherwise we can't even access the PLL.
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
- dport->release_cl2_override =
+ dig_port->release_cl2_override =
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, lane_mask);
@@ -870,10 +870,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
int data, i, stagger;
u32 val;
@@ -948,12 +948,12 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (dport->release_cl2_override) {
+ if (dig_port->release_cl2_override) {
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
- dport->release_cl2_override = false;
+ dig_port->release_cl2_override = false;
}
}
@@ -997,8 +997,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
- enum dpio_channel port = vlv_dport_to_channel(dport);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = intel_crtc->pipe;
vlv_dpio_get(dev_priv);
@@ -1022,10 +1022,10 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
/* Program Tx lane resets to default */
@@ -1052,10 +1052,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -1081,10 +1081,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
vlv_dpio_get(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 5cd09034519b..307ed8ae9a19 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -324,6 +324,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
+ int num_modes;
/*
* We should probably have an i2c driver get_modes function for those
@@ -331,21 +332,22 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector,
- intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
- if (!list_empty(&connector->probed_modes))
- return 1;
+ num_modes = intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
+ if (num_modes)
+ return num_modes;
if (fixed_mode) {
struct drm_display_mode *mode;
+
mode = drm_mode_duplicate(connector->dev, fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
- return 1;
+ num_modes++;
}
}
- return 0;
+ return num_modes;
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 69a0682ddb6a..85723fba6002 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -187,8 +187,30 @@ static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
}
+static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+static void i965_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
/* This function forces a CFB recompression through the nuke operation. */
-static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
@@ -198,6 +220,16 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
}
+static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 6)
+ snb_fbc_recompress(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 4)
+ i965_fbc_recompress(dev_priv);
+ else
+ i8xx_fbc_recompress(dev_priv);
+}
+
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
@@ -315,21 +347,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
- if (IS_IVYBRIDGE(dev_priv)) {
- /* WaFbcAsynchFlipDisableFbcQueue:ivb */
- intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
- intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
- intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
- }
-
- if (INTEL_GEN(dev_priv) >= 11)
- /* Wa_1409120013:icl,ehl,tgl */
- intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
- ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
@@ -695,9 +712,13 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
cache->fb.format = fb->format;
- cache->fb.stride = fb->pitches[0];
cache->fb.modifier = fb->modifier;
+ /* FIXME is this correct? */
+ cache->fb.stride = plane_state->color_plane[0].stride;
+ if (drm_rotation_90_or_270(plane_state->hw.rotation))
+ cache->fb.stride *= fb->format->cpp[0];
+
/* FBC1 compression interval: arbitrary choice of 1 second */
cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
@@ -721,6 +742,25 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
fbc->compressed_fb.size * fbc->threshold;
}
+static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+ if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
+ cache->fb.modifier != I915_FORMAT_MOD_X_TILED)
+ return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
+ else
+ return 0;
+}
+
+static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv);
+}
+
static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
@@ -797,6 +837,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
+ fbc->no_fbc_reason = "pixel format is invalid";
+ return false;
+ }
+
if (!rotation_is_valid(dev_priv, cache->fb.format->format,
cache->plane.rotation)) {
fbc->no_fbc_reason = "rotation unsupported";
@@ -813,11 +858,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
- fbc->no_fbc_reason = "pixel format is invalid";
- return false;
- }
-
if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
cache->fb.format->has_alpha) {
fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
@@ -881,6 +921,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
params->fb.format = cache->fb.format;
+ params->fb.modifier = cache->fb.modifier;
params->fb.stride = cache->fb.stride;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
@@ -910,6 +951,9 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
if (params->fb.format != cache->fb.format)
return false;
+ if (params->fb.modifier != cache->fb.modifier)
+ return false;
+
if (params->fb.stride != cache->fb.stride)
return false;
@@ -1197,7 +1241,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
if (fbc->crtc) {
if (fbc->crtc != crtc ||
- !intel_fbc_cfb_size_changed(dev_priv))
+ (!intel_fbc_cfb_size_changed(dev_priv) &&
+ !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv)))
goto out;
__intel_fbc_disable(dev_priv);
@@ -1219,12 +1264,7 @@ void intel_fbc_enable(struct intel_atomic_state *state,
goto out;
}
- if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
- plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED)
- cache->gen9_wa_cfb_stride =
- DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
- else
- cache->gen9_wa_cfb_stride = 0;
+ cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
pipe_name(crtc->pipe));
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 815b054bb167..89a4d294822d 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -40,15 +40,15 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv)
}
static
-int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
for (i = 0; i < tries; i++) {
- ret = shim->read_bksv(intel_dig_port, bksv);
+ ret = shim->read_bksv(dig_port, bksv);
if (ret)
return ret;
if (intel_hdcp_is_ksv_valid(bksv))
@@ -65,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_capable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bool capable = false;
u8 bksv[5];
@@ -74,9 +74,9 @@ bool intel_hdcp_capable(struct intel_connector *connector)
return capable;
if (shim->hdcp_capable) {
- shim->hdcp_capable(intel_dig_port, &capable);
+ shim->hdcp_capable(dig_port, &capable);
} else {
- if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+ if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
capable = true;
}
@@ -86,7 +86,7 @@ bool intel_hdcp_capable(struct intel_connector *connector)
/* Is HDCP2.2 capable on Platform and Sink */
bool intel_hdcp2_capable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
@@ -104,7 +104,7 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
mutex_unlock(&dev_priv->hdcp_comp_mutex);
/* Sink's capability for HDCP2.2 */
- hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
+ hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
return capable;
}
@@ -125,14 +125,14 @@ static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
LINK_ENCRYPTION_STATUS;
}
-static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
+static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
int ret, read_ret;
bool ksv_ready;
/* Poll for ksv list ready (spec says max time allowed is 5s) */
- ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
+ ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
&ksv_ready),
read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
100 * 1000);
@@ -300,16 +300,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
/* Process V' values from the receiver */
for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
- ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
+ ret = shim->read_v_prime_part(dig_port, i, &vprime);
if (ret)
return ret;
intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
@@ -528,20 +528,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
static
int intel_hdcp_auth_downstream(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, tries = 3;
- ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+ ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
"KSV list failed to become ready (%d)\n", ret);
return ret;
}
- ret = shim->read_bstatus(intel_dig_port, bstatus);
+ ret = shim->read_bstatus(dig_port, bstatus);
if (ret)
return ret;
@@ -571,12 +571,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
return -ENOMEM;
}
- ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+ ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
if (ret)
goto err;
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
- num_downstream)) {
+ num_downstream) > 0) {
drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
ret = -EPERM;
goto err;
@@ -611,12 +611,12 @@ err:
/* Implements Part 1 of the HDCP authorization procedure */
static int intel_hdcp_auth(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
union {
@@ -640,7 +640,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
* displays, this is not necessary.
*/
if (shim->hdcp_capable) {
- ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
+ ret = shim->hdcp_capable(dig_port, &hdcp_capable);
if (ret)
return ret;
if (!hdcp_capable) {
@@ -670,7 +670,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_ANLO(dev_priv, cpu_transcoder, port));
an.reg[1] = intel_de_read(dev_priv,
HDCP_ANHI(dev_priv, cpu_transcoder, port));
- ret = shim->write_an_aksv(intel_dig_port, an.shim);
+ ret = shim->write_an_aksv(dig_port, an.shim);
if (ret)
return ret;
@@ -678,11 +678,11 @@ static int intel_hdcp_auth(struct intel_connector *connector)
memset(&bksv, 0, sizeof(bksv));
- ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+ ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
if (ret < 0)
return ret;
- if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
drm_err(&dev_priv->drm, "BKSV is revoked\n");
return -EPERM;
}
@@ -692,14 +692,14 @@ static int intel_hdcp_auth(struct intel_connector *connector)
intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
bksv.reg[1]);
- ret = shim->repeater_present(intel_dig_port, &repeater_present);
+ ret = shim->repeater_present(dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
intel_de_write(dev_priv, HDCP_REP_CTL,
intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
- ret = shim->toggle_signalling(intel_dig_port, true);
+ ret = shim->toggle_signalling(dig_port, true);
if (ret)
return ret;
@@ -732,7 +732,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
*/
for (i = 0; i < tries; i++) {
ri.reg = 0;
- ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+ ret = shim->read_ri_prime(dig_port, ri.shim);
if (ret)
return ret;
intel_de_write(dev_priv,
@@ -776,10 +776,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
static int _intel_hdcp_disable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
@@ -796,7 +796,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return -ETIMEDOUT;
}
- ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+ ret = hdcp->shim->toggle_signalling(dig_port, false);
if (ret) {
drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
return ret;
@@ -859,10 +859,10 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
/* Implements Part 3 of the HDCP authorization procedure */
static int intel_hdcp_check_link(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -888,7 +888,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (hdcp->shim->check_link(intel_dig_port)) {
+ if (hdcp->shim->check_link(dig_port)) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&hdcp->prop_work);
@@ -1242,7 +1242,7 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
/* Authentication flow starts from here */
static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
@@ -1264,12 +1264,12 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
+ ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
sizeof(msgs.ake_init));
if (ret < 0)
return ret;
- ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
&msgs.send_cert, sizeof(msgs.send_cert));
if (ret < 0)
return ret;
@@ -1283,7 +1283,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
msgs.send_cert.cert_rx.receiver_id,
- 1)) {
+ 1) > 0) {
drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
return -EPERM;
}
@@ -1298,11 +1298,11 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
+ ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
if (ret < 0)
return ret;
- ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
&msgs.send_hprime, sizeof(msgs.send_hprime));
if (ret < 0)
return ret;
@@ -1313,7 +1313,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (!hdcp->is_paired) {
/* Pairing is required */
- ret = shim->read_2_2_msg(intel_dig_port,
+ ret = shim->read_2_2_msg(dig_port,
HDCP_2_2_AKE_SEND_PAIRING_INFO,
&msgs.pairing_info,
sizeof(msgs.pairing_info));
@@ -1331,7 +1331,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
static int hdcp2_locality_check(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_lc_init lc_init;
@@ -1345,12 +1345,12 @@ static int hdcp2_locality_check(struct intel_connector *connector)
if (ret < 0)
continue;
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
+ ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
sizeof(msgs.lc_init));
if (ret < 0)
continue;
- ret = shim->read_2_2_msg(intel_dig_port,
+ ret = shim->read_2_2_msg(dig_port,
HDCP_2_2_LC_SEND_LPRIME,
&msgs.send_lprime,
sizeof(msgs.send_lprime));
@@ -1367,7 +1367,7 @@ static int hdcp2_locality_check(struct intel_connector *connector)
static int hdcp2_session_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp2_ske_send_eks send_eks;
int ret;
@@ -1376,7 +1376,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
+ ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
sizeof(send_eks));
if (ret < 0)
return ret;
@@ -1387,7 +1387,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
static
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
@@ -1409,12 +1409,12 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
/* Send it to Repeater */
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
+ ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
sizeof(msgs.stream_manage));
if (ret < 0)
return ret;
- ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
&msgs.stream_ready, sizeof(msgs.stream_ready));
if (ret < 0)
return ret;
@@ -1439,7 +1439,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static
int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
@@ -1451,7 +1451,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
u8 *rx_info;
int ret;
- ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
&msgs.recvid_list, sizeof(msgs.recvid_list));
if (ret < 0)
return ret;
@@ -1484,7 +1484,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
msgs.recvid_list.receiver_ids,
- device_cnt)) {
+ device_cnt) > 0) {
drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
return -EPERM;
}
@@ -1496,7 +1496,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
return ret;
hdcp->seq_num_v = seq_num_v;
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
+ ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
sizeof(msgs.rep_ack));
if (ret < 0)
return ret;
@@ -1517,7 +1517,7 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
static int hdcp2_authenticate_sink(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
@@ -1543,7 +1543,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
}
if (shim->config_stream_type) {
- ret = shim->config_stream_type(intel_dig_port,
+ ret = shim->config_stream_type(dig_port,
hdcp->is_repeater,
hdcp->content_type);
if (ret < 0)
@@ -1569,10 +1569,10 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
@@ -1580,7 +1580,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
- ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
+ ret = hdcp->shim->toggle_signalling(dig_port, true);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to enable HDCP signalling. %d\n",
@@ -1608,10 +1608,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
static int hdcp2_disable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
@@ -1630,7 +1630,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
if (hdcp->shim->toggle_signalling) {
- ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+ ret = hdcp->shim->toggle_signalling(dig_port, false);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to disable HDCP signalling. %d\n",
@@ -1723,10 +1723,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
/* Implements the Link Integrity Check for HDCP2.2 */
static int intel_hdcp2_check_link(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -1751,7 +1751,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- ret = hdcp->shim->check_2_2_link(intel_dig_port);
+ ret = hdcp->shim->check_2_2_link(dig_port);
if (ret == HDCP_LINK_PROTECTED) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
@@ -2086,6 +2086,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
(conn_state->hdcp_content_type != hdcp->content_type &&
conn_state->content_protection !=
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+ bool desired_and_not_enabled = false;
/*
* During the HDCP encryption session if Type change is requested,
@@ -2108,8 +2109,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
}
if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED ||
- content_protection_type_changed)
+ DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ mutex_lock(&hdcp->mutex);
+ /* Avoid enabling hdcp, if it already ENABLED */
+ desired_and_not_enabled =
+ hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (desired_and_not_enabled || content_protection_type_changed)
intel_hdcp_enable(connector,
crtc_state->cpu_transcoder,
(u8)conn_state->hdcp_content_type);
@@ -2158,6 +2166,19 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
return;
}
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+ /*
+ * Fix the HDCP uapi content protection state in case of modeset.
+ * FIXME: As per HDCP content protection property uapi doc, an uevent()
+ * need to be sent if there is transition from ENABLED->DESIRED.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+ (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
+ new_state->content_protection =
+ DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
/*
* Nothing to do if the state didn't change, or HDCP was activated since
* the last commit. And also no change in hdcp content type.
@@ -2170,8 +2191,6 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
return;
}
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
crtc_state->mode_changed = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 864a1642e81c..de2ce5632b94 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -88,10 +88,10 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
container_of(&encoder->base, struct intel_digital_port,
base.base);
- return &intel_dig_port->hdmi;
+ return &dig_port->hdmi;
}
static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
@@ -660,7 +660,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
const union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -681,7 +681,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
buffer[3] = 0;
len++;
- intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
+ dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
}
void intel_read_infoframe(struct intel_encoder *encoder,
@@ -689,7 +689,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
int ret;
@@ -697,7 +697,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dig_port->read_infoframe(encoder, crtc_state,
+ dig_port->read_infoframe(encoder, crtc_state,
type, buffer, sizeof(buffer));
/* Fill the 'hole' (see big comment above) at position 3 */
@@ -872,8 +872,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1057,8 +1057,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1275,11 +1275,11 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
adapter, enable);
}
-static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port,
unsigned int offset, void *buffer, size_t size)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
@@ -1304,11 +1304,11 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
return ret >= 0 ? -EIO : ret;
}
-static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port,
unsigned int offset, void *buffer, size_t size)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
@@ -1338,16 +1338,16 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
u8 *an)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
- ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
+ ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
@@ -1363,13 +1363,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
return 0;
}
-static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port,
u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
@@ -1378,13 +1378,13 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port,
u8 *bstatus)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
@@ -1393,14 +1393,14 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
u8 val;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
ret);
@@ -1411,13 +1411,13 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
u8 *ri_prime)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
@@ -1426,14 +1426,14 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
bool *ksv_ready)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
u8 val;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
ret);
@@ -1444,12 +1444,12 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
drm_dbg_kms(&i915->drm,
@@ -1460,16 +1460,16 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
int i, u32 *part)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
return -EINVAL;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
@@ -1480,7 +1480,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_crtc *crtc = connector->base.state->crtc;
struct intel_crtc *intel_crtc = container_of(crtc,
struct intel_crtc, base);
@@ -1494,13 +1494,13 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
usleep_range(25, 50);
}
- ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false);
if (ret) {
drm_err(&dev_priv->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
- ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true);
if (ret) {
drm_err(&dev_priv->drm,
"Enable HDCP signalling failed (%d)\n", ret);
@@ -1511,10 +1511,10 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
}
static
-int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
bool enable)
{
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
@@ -1522,7 +1522,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
if (!enable)
usleep_range(6, 60); /* Bspec says >= 6us */
- ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable);
if (ret) {
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
@@ -1540,12 +1540,12 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
}
static
-bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
+bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_connector *connector =
- intel_dig_port->hdmi.attached_connector;
- enum port port = intel_dig_port->base.port;
+ dig_port->hdmi.attached_connector;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
union {
@@ -1553,7 +1553,7 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
u8 shim[DRM_HDCP_RI_LEN];
} ri;
- ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim);
+ ret = intel_hdmi_hdcp_read_ri_prime(dig_port, ri.shim);
if (ret)
return false;
@@ -1572,13 +1572,13 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
}
static
-bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int retry;
for (retry = 0; retry < 3; retry++)
- if (intel_hdmi_hdcp_check_link_once(intel_dig_port))
+ if (intel_hdmi_hdcp_check_link_once(dig_port))
return true;
drm_err(&i915->drm, "Link check failed\n");
@@ -1599,10 +1599,10 @@ static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
};
static
-int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
u8 *rx_status)
{
- return intel_hdmi_hdcp_read(intel_dig_port,
+ return intel_hdmi_hdcp_read(dig_port,
HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
rx_status,
HDCP_2_2_HDMI_RXSTATUS_LEN);
@@ -1628,15 +1628,15 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
}
static int
-hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
u8 msg_id, bool *msg_ready,
ssize_t *msg_sz)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
- ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+ ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
if (ret < 0) {
drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
ret);
@@ -1656,10 +1656,10 @@ hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
}
static ssize_t
-intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
u8 msg_id, bool paired)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
bool msg_ready = false;
int timeout, ret;
ssize_t msg_sz = 0;
@@ -1668,7 +1668,7 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
if (timeout < 0)
return timeout;
- ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
+ ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port,
msg_id, &msg_ready,
&msg_sz),
!ret && msg_ready && msg_sz, timeout * 1000,
@@ -1681,26 +1681,26 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *dig_port,
void *buf, size_t size)
{
unsigned int offset;
offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
- return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
+ return intel_hdmi_hdcp_write(dig_port, offset, buf, size);
}
static
-int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
u8 msg_id, void *buf, size_t size)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
unsigned int offset;
ssize_t ret;
- ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
+ ret = intel_hdmi_hdcp2_wait_for_msg(dig_port, msg_id,
hdcp->is_paired);
if (ret < 0)
return ret;
@@ -1717,7 +1717,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
}
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
- ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
+ ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret);
if (ret)
drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
msg_id, ret);
@@ -1726,12 +1726,12 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
{
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
- ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+ ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
if (ret)
return ret;
@@ -1748,14 +1748,14 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
}
static
-int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_capable(struct intel_digital_port *dig_port,
bool *capable)
{
u8 hdcp2_version;
int ret;
*capable = false;
- ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+ ret = intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
&hdcp2_version, sizeof(hdcp2_version));
if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
*capable = true;
@@ -2063,7 +2063,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
@@ -2107,7 +2107,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_dig_port->set_infoframes(encoder,
+ dig_port->set_infoframes(encoder,
false,
old_crtc_state, old_conn_state);
@@ -2242,8 +2242,11 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (!has_hdmi_sink)
+ return MODE_CLOCK_LOW;
clock *= 2;
+ }
if (drm_mode_is_420_only(&connector->display_info, mode))
clock /= 2;
@@ -2428,8 +2431,8 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
return 0;
}
-static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2722,12 +2725,12 @@ static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
enc_to_dig_port(encoder);
intel_hdmi_prepare(encoder, pipe_config);
- intel_dig_port->set_infoframes(encoder,
+ dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
}
@@ -2737,7 +2740,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -2746,13 +2749,13 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
0x2b247878);
- dport->set_infoframes(encoder,
+ dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
- vlv_wait_port_ready(dev_priv, dport, 0x0);
+ vlv_wait_port_ready(dev_priv, dig_port, 0x0);
}
static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
@@ -2813,7 +2816,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2823,13 +2826,13 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
/* Use 800mV-0dB */
chv_set_phy_signal_level(encoder, 128, 102, false);
- dport->set_infoframes(encoder,
+ dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
- vlv_wait_port_ready(dev_priv, dport, 0x0);
+ vlv_wait_port_ready(dev_priv, dig_port, 0x0);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -2880,19 +2883,13 @@ intel_hdmi_connector_register(struct drm_connector *connector)
return ret;
}
-static void intel_hdmi_destroy(struct drm_connector *connector)
+static void intel_hdmi_connector_unregister(struct drm_connector *connector)
{
struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
cec_notifier_conn_unregister(n);
- intel_connector_destroy(connector);
-}
-
-static void intel_hdmi_connector_unregister(struct drm_connector *connector)
-{
intel_hdmi_remove_i2c_symlink(connector);
-
intel_connector_unregister(connector);
}
@@ -2904,7 +2901,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_hdmi_connector_register,
.early_unregister = intel_hdmi_connector_unregister,
- .destroy = intel_hdmi_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
@@ -2923,7 +2920,7 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
@@ -2935,7 +2932,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
* ToDo: This needs to be extended for LSPCON implementation
* as well. Will be implemented separately.
*/
- if (!intel_dig_port->lspcon.active)
+ if (!dig_port->lspcon.active)
intel_attach_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
@@ -3172,52 +3169,52 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
return ddc_pin;
}
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
+void intel_infoframe_init(struct intel_digital_port *dig_port)
{
struct drm_i915_private *dev_priv =
- to_i915(intel_dig_port->base.base.dev);
+ to_i915(dig_port->base.base.dev);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_dig_port->write_infoframe = vlv_write_infoframe;
- intel_dig_port->read_infoframe = vlv_read_infoframe;
- intel_dig_port->set_infoframes = vlv_set_infoframes;
- intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
+ dig_port->write_infoframe = vlv_write_infoframe;
+ dig_port->read_infoframe = vlv_read_infoframe;
+ dig_port->set_infoframes = vlv_set_infoframes;
+ dig_port->infoframes_enabled = vlv_infoframes_enabled;
} else if (IS_G4X(dev_priv)) {
- intel_dig_port->write_infoframe = g4x_write_infoframe;
- intel_dig_port->read_infoframe = g4x_read_infoframe;
- intel_dig_port->set_infoframes = g4x_set_infoframes;
- intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
+ dig_port->write_infoframe = g4x_write_infoframe;
+ dig_port->read_infoframe = g4x_read_infoframe;
+ dig_port->set_infoframes = g4x_set_infoframes;
+ dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
- if (intel_dig_port->lspcon.active) {
- intel_dig_port->write_infoframe = lspcon_write_infoframe;
- intel_dig_port->read_infoframe = lspcon_read_infoframe;
- intel_dig_port->set_infoframes = lspcon_set_infoframes;
- intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
+ if (dig_port->lspcon.active) {
+ dig_port->write_infoframe = lspcon_write_infoframe;
+ dig_port->read_infoframe = lspcon_read_infoframe;
+ dig_port->set_infoframes = lspcon_set_infoframes;
+ dig_port->infoframes_enabled = lspcon_infoframes_enabled;
} else {
- intel_dig_port->write_infoframe = hsw_write_infoframe;
- intel_dig_port->read_infoframe = hsw_read_infoframe;
- intel_dig_port->set_infoframes = hsw_set_infoframes;
- intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
+ dig_port->write_infoframe = hsw_write_infoframe;
+ dig_port->read_infoframe = hsw_read_infoframe;
+ dig_port->set_infoframes = hsw_set_infoframes;
+ dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
} else if (HAS_PCH_IBX(dev_priv)) {
- intel_dig_port->write_infoframe = ibx_write_infoframe;
- intel_dig_port->read_infoframe = ibx_read_infoframe;
- intel_dig_port->set_infoframes = ibx_set_infoframes;
- intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
+ dig_port->write_infoframe = ibx_write_infoframe;
+ dig_port->read_infoframe = ibx_read_infoframe;
+ dig_port->set_infoframes = ibx_set_infoframes;
+ dig_port->infoframes_enabled = ibx_infoframes_enabled;
} else {
- intel_dig_port->write_infoframe = cpt_write_infoframe;
- intel_dig_port->read_infoframe = cpt_read_infoframe;
- intel_dig_port->set_infoframes = cpt_set_infoframes;
- intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
+ dig_port->write_infoframe = cpt_write_infoframe;
+ dig_port->read_infoframe = cpt_read_infoframe;
+ dig_port->set_infoframes = cpt_set_infoframes;
+ dig_port->infoframes_enabled = cpt_infoframes_enabled;
}
}
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
- struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i2c_adapter *ddc;
@@ -3231,9 +3228,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
- if (drm_WARN(dev, intel_dig_port->max_lanes < 4,
+ if (drm_WARN(dev, dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
return;
@@ -3322,21 +3319,21 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
void intel_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
- if (!intel_dig_port)
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
return;
intel_connector = intel_connector_alloc();
if (!intel_connector) {
- kfree(intel_dig_port);
+ kfree(dig_port);
return;
}
- intel_encoder = &intel_dig_port->base;
+ intel_encoder = &dig_port->base;
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
@@ -3393,12 +3390,12 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
- intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
- intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
- intel_dig_port->max_lanes = 4;
+ dig_port->hdmi.hdmi_reg = hdmi_reg;
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->max_lanes = 4;
- intel_infoframe_init(intel_dig_port);
+ intel_infoframe_init(dig_port);
- intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- intel_hdmi_init_connector(intel_dig_port, intel_connector);
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ intel_hdmi_init_connector(dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 8ff1f76a63df..5b348dcab77a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -25,7 +25,7 @@ enum port;
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
int intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -36,7 +36,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool high_tmds_clock_ratio,
bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
+void intel_infoframe_init(struct intel_digital_port *dig_port);
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
u32 intel_hdmi_infoframe_enable(unsigned int type);
@@ -46,5 +46,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
union hdmi_infoframe *frame);
+bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 6ff7b226f0a1..b781bf469644 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -550,11 +550,11 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
}
-bool lspcon_init(struct intel_digital_port *intel_dig_port)
+bool lspcon_init(struct intel_digital_port *dig_port)
{
- struct intel_dp *dp = &intel_dig_port->dp;
- struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_connector *connector = &dp->attached_connector->base;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index 37cfddf8a9c5..1cffe8a42a08 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -15,7 +15,7 @@ struct intel_digital_port;
struct intel_encoder;
struct intel_lspcon;
-bool lspcon_init(struct intel_digital_port *intel_dig_port);
+bool lspcon_init(struct intel_digital_port *dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
void lspcon_write_infoframe(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 611cb8d74811..bf9e320c547d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -905,8 +905,8 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
u32 val;
drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 773523dcd107..2da4388e1540 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -94,6 +94,8 @@ struct intel_sdvo {
*/
struct intel_sdvo_caps caps;
+ u8 colorimetry_cap;
+
/* Pixel clock limitations reported by the SDVO device, in kHz */
int pixel_clock_min, pixel_clock_max;
@@ -942,6 +944,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
+static bool intel_sdvo_set_pixel_replication(struct intel_sdvo *intel_sdvo,
+ u8 pixel_repeat)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_PIXEL_REPLI,
+ &pixel_repeat, 1);
+}
+
static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
u8 audio_state)
{
@@ -1277,6 +1286,18 @@ static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
}
+static bool intel_sdvo_limited_color_range(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+ if ((intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) == 0)
+ return false;
+
+ return intel_hdmi_limited_color_range(crtc_state, conn_state);
+}
+
static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1342,21 +1363,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
}
- if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /*
- * See CEA-861-E - 5.1 Default Encoding Parameters
- *
- * FIXME: This bit is only valid when using TMDS encoding and 8
- * bit per color mode.
- */
- if (pipe_config->has_hdmi_sink &&
- drm_match_cea_mode(adjusted_mode) > 1)
- pipe_config->limited_color_range = true;
- } else {
- if (pipe_config->has_hdmi_sink &&
- intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED)
- pipe_config->limited_color_range = true;
- }
+ pipe_config->limited_color_range =
+ intel_sdvo_limited_color_range(encoder, pipe_config,
+ conn_state);
/* Clock computation needs to happen after pixel multiplier. */
if (IS_TV(intel_sdvo_connector))
@@ -1495,8 +1504,13 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
if (crtc_state->has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
+ crtc_state->limited_color_range ?
+ SDVO_COLORIMETRY_RGB220 :
SDVO_COLORIMETRY_RGB256);
intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
+ intel_sdvo_set_pixel_replication(intel_sdvo,
+ !!(adjusted_mode->flags &
+ DRM_MODE_FLAG_DBLCLK));
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1530,8 +1544,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
- sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
@@ -1689,8 +1701,11 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
- if (sdvox & HDMI_COLOR_RANGE_16_235)
- pipe_config->limited_color_range = true;
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY,
+ &val, 1)) {
+ if (val == SDVO_COLORIMETRY_RGB220)
+ pipe_config->limited_color_range = true;
+ }
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
&val, 1)) {
@@ -1850,17 +1865,26 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, connector->state);
+ int clock = mode->clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (intel_sdvo->pixel_clock_min > mode->clock)
- return MODE_CLOCK_LOW;
- if (intel_sdvo->pixel_clock_max < mode->clock)
+ if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
- if (mode->clock > max_dotclk)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (!has_hdmi_sink)
+ return MODE_CLOCK_LOW;
+ clock *= 2;
+ }
+
+ if (intel_sdvo->pixel_clock_min > clock)
+ return MODE_CLOCK_LOW;
+
+ if (intel_sdvo->pixel_clock_max < clock)
return MODE_CLOCK_HIGH;
if (IS_LVDS(intel_sdvo_connector)) {
@@ -1914,6 +1938,17 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
+static u8 intel_sdvo_get_colorimetry_cap(struct intel_sdvo *intel_sdvo)
+{
+ u8 cap;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY_CAP,
+ &cap, sizeof(cap)))
+ return SDVO_COLORIMETRY_RGB256;
+
+ return cap;
+}
+
static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
@@ -2100,8 +2135,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
return ret;
}
-static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
+ int num_modes = 0;
struct edid *edid;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2116,18 +2152,19 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* DDC fails, check to see if the analog output is disconnected, in
* which case we'll look there for the digital DDC data.
*/
- if (edid == NULL)
+ if (!edid)
edid = intel_sdvo_get_analog_edid(connector);
- if (edid != NULL) {
- if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
- edid)) {
- drm_connector_update_edid_property(connector, edid);
- drm_add_edid_modes(connector, edid);
- }
+ if (!edid)
+ return 0;
- kfree(edid);
- }
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid))
+ num_modes += intel_connector_update_modes(connector, edid);
+
+ kfree(edid);
+
+ return num_modes;
}
/*
@@ -2195,12 +2232,13 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
-static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
const struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_sdtv_resolution_request tv_res;
u32 reply = 0, format_map = 0;
+ int num_modes = 0;
int i;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2215,31 +2253,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
- return;
+ return 0;
BUILD_BUG_ON(sizeof(tv_res) != 3);
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res)))
- return;
+ return 0;
if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
- return;
+ return 0;
- for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) {
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
&sdvo_tv_modes[i]);
- if (nmode)
+ if (nmode) {
drm_mode_probed_add(connector, nmode);
+ num_modes++;
+ }
}
+ }
+
+ return num_modes;
}
-static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+static int intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
+ int num_modes = 0;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -2256,6 +2300,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
newmode->type = (DRM_MODE_TYPE_PREFERRED |
DRM_MODE_TYPE_DRIVER);
drm_mode_probed_add(connector, newmode);
+ num_modes++;
}
}
@@ -2264,7 +2309,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+ num_modes += intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+ return num_modes;
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -2272,13 +2319,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (IS_TV(intel_sdvo_connector))
- intel_sdvo_get_tv_modes(connector);
+ return intel_sdvo_get_tv_modes(connector);
else if (IS_LVDS(intel_sdvo_connector))
- intel_sdvo_get_lvds_modes(connector);
+ return intel_sdvo_get_lvds_modes(connector);
else
- intel_sdvo_get_ddc_modes(connector);
-
- return !list_empty(&connector->probed_modes);
+ return intel_sdvo_get_ddc_modes(connector);
}
static int
@@ -2669,12 +2714,9 @@ static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
-
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
+ if (intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220)
intel_attach_broadcast_rgb_property(&connector->base.base);
- }
intel_attach_aspect_ratio_property(&connector->base.base);
}
@@ -3315,6 +3357,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
+ intel_sdvo->colorimetry_cap =
+ intel_sdvo_get_colorimetry_cap(intel_sdvo);
+
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
index 13b9a8e257bb..74dc6c042b6e 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
@@ -705,10 +705,10 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
#define SDVO_CMD_SET_COLORIMETRY 0x8e
- #define SDVO_COLORIMETRY_RGB256 0x0
- #define SDVO_COLORIMETRY_RGB220 0x1
- #define SDVO_COLORIMETRY_YCrCb422 0x3
- #define SDVO_COLORIMETRY_YCrCb444 0x4
+ #define SDVO_COLORIMETRY_RGB256 (1 << 0)
+ #define SDVO_COLORIMETRY_RGB220 (1 << 1)
+ #define SDVO_COLORIMETRY_YCrCb422 (1 << 2)
+ #define SDVO_COLORIMETRY_YCrCb444 (1 << 3)
#define SDVO_CMD_GET_COLORIMETRY 0x8f
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
#define SDVO_CMD_SET_AUDIO_STAT 0x91
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index aef7fe932d1a..6faabd4f6d49 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -820,6 +820,7 @@ struct bdb_lfp_power {
u16 adb;
u16 lace_enabled_status;
struct agressiveness_profile_entry aggressivenes[16];
+ u16 hobl; /* 232+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index d145fe2bed81..c5735c365659 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -1045,7 +1045,7 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
@@ -1055,9 +1055,9 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
/* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
- intel_dig_port->write_infoframe(encoder, crtc_state,
- DP_SDP_PPS, &dp_dsc_pps_sdp,
- sizeof(dp_dsc_pps_sdp));
+ dig_port->write_infoframe(encoder, crtc_state,
+ DP_SDP_PPS, &dp_dsc_pps_sdp,
+ sizeof(dp_dsc_pps_sdp));
}
void intel_dsc_enable(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index d3a86a4d5c04..278664f831e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -32,16 +32,17 @@ static void vma_clear_pages(struct i915_vma *vma)
vma->pages = NULL;
}
-static int vma_bind(struct i915_vma *vma,
+static int vma_bind(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
+ return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
}
-static void vma_unbind(struct i915_vma *vma)
+static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
- vma->vm->vma_ops.unbind_vma(vma);
+ vm->vma_ops.unbind_vma(vm, vma);
}
static const struct i915_vma_ops proxy_vma_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 6675447a47b9..d0bdb6d447ed 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -101,8 +101,7 @@ static void lut_close(struct i915_gem_context *ctx)
struct radix_tree_iter iter;
void __rcu **slot;
- lockdep_assert_held(&ctx->mutex);
-
+ mutex_lock(&ctx->lut_mutex);
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
@@ -135,6 +134,7 @@ static void lut_close(struct i915_gem_context *ctx)
i915_gem_object_put(obj);
}
rcu_read_unlock();
+ mutex_unlock(&ctx->lut_mutex);
}
static struct intel_context *
@@ -342,6 +342,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
spin_unlock(&ctx->i915->gem.contexts.lock);
mutex_destroy(&ctx->engines_mutex);
+ mutex_destroy(&ctx->lut_mutex);
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
@@ -725,6 +726,7 @@ __create_context(struct drm_i915_private *i915)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
+ mutex_init(&ctx->lut_mutex);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@@ -1312,11 +1314,11 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (vm == rcu_access_pointer(ctx->vm))
goto unlock;
+ old = __set_ppgtt(ctx, vm);
+
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
lut_close(ctx);
- old = __set_ppgtt(ctx, vm);
-
/*
* We need to flush any requests using the current ppgtt before
* we release it as the requests do not hold a reference themselves,
@@ -1330,6 +1332,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (err) {
i915_vm_close(__set_ppgtt(ctx, old));
i915_vm_close(old);
+ lut_close(ctx); /* force a rebuild of the old obj:vma cache */
}
unlock:
@@ -1397,11 +1400,12 @@ static int get_ringsize(struct i915_gem_context *ctx,
}
int
-i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+i915_gem_user_to_context_sseu(struct intel_gt *gt,
const struct drm_i915_gem_context_param_sseu *user,
struct intel_sseu *context)
{
- const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
+ const struct sseu_dev_info *device = &gt->info.sseu;
+ struct drm_i915_private *i915 = gt->i915;
/* No zeros in any field. */
if (!user->slice_mask || !user->subslice_mask ||
@@ -1534,7 +1538,7 @@ static int set_sseu(struct i915_gem_context *ctx,
goto out_ce;
}
- ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
+ ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
if (ret)
goto out_ce;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 3702b2fb27ab..a133f92bbedb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -225,7 +225,7 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
-int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+int i915_gem_user_to_context_sseu(struct intel_gt *gt,
const struct drm_i915_gem_context_param_sseu *user,
struct intel_sseu *context);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 28760bd03265..ae14ca24a11f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -170,6 +170,7 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
+ struct mutex lut_mutex;
/**
* @name: arbitrary name, used for user debug
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b4862afaaf28..6b4ec66cb558 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -782,10 +782,15 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
/* Check that the context hasn't been closed in the meantime */
err = -EINTR;
- if (!mutex_lock_interruptible(&ctx->mutex)) {
- err = -ENOENT;
- if (likely(!i915_gem_context_is_closed(ctx)))
+ if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
+ struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
+
+ if (unlikely(vm && vma->vm != vm))
+ err = -EAGAIN; /* user racing with ctx set-vm */
+ else if (likely(!i915_gem_context_is_closed(ctx)))
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
+ else
+ err = -ENOENT;
if (err == 0) { /* And nor has this handle */
struct drm_i915_gem_object *obj = vma->obj;
@@ -798,7 +803,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
}
spin_unlock(&obj->lut_lock);
}
- mutex_unlock(&ctx->mutex);
+ mutex_unlock(&ctx->lut_mutex);
}
if (unlikely(err))
goto err;
@@ -814,6 +819,8 @@ err:
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
{
+ struct i915_address_space *vm = eb->context->vm;
+
do {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -821,7 +828,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
rcu_read_lock();
vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
- if (likely(vma))
+ if (likely(vma && vma->vm == vm))
vma = i915_vma_tryget(vma);
rcu_read_unlock();
if (likely(vma))
@@ -831,7 +838,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
if (unlikely(!obj))
return ERR_PTR(-ENOENT);
- vma = i915_vma_instance(obj, eb->context->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
@@ -1973,8 +1980,7 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
static int num_vcs_engines(const struct drm_i915_private *i915)
{
- return hweight64(INTEL_INFO(i915)->engine_mask &
- GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
+ return hweight64(VDBOX_MASK(&i915->gt));
}
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index fe27c5b344e3..b23368529a40 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -448,7 +448,7 @@ void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
* mapping will then trigger a page fault on the next user access, allowing
* fixup by vm_fault_gtt().
*/
-static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
@@ -507,19 +507,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
spin_unlock(&obj->mmo.lock);
}
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- */
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_release_mmap_gtt(obj);
- i915_gem_object_release_mmap_offset(obj);
-}
-
static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index 862e01b7cb69..efee9e0d2508 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -25,7 +25,8 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
u32 handle, u64 *offset);
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 6b69191c5543..c8421fd9d2dc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -143,14 +143,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
* vma, in the same fd namespace, by virtue of flink/open.
*/
- mutex_lock(&ctx->mutex);
+ mutex_lock(&ctx->lut_mutex);
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
if (vma) {
GEM_BUG_ON(vma->obj != obj);
GEM_BUG_ON(!atomic_read(&vma->open_count));
i915_vma_close(vma);
}
- mutex_unlock(&ctx->mutex);
+ mutex_unlock(&ctx->lut_mutex);
i915_gem_context_put(lut->ctx);
i915_lut_handle_free(lut);
@@ -171,14 +171,35 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
atomic_dec(&i915->mm.free_count);
}
+static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
+{
+ /* Skip serialisation and waking the device if known to be not used. */
+
+ if (obj->userfault_count)
+ i915_gem_object_release_mmap_gtt(obj);
+
+ if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
+ struct i915_mmap_offset *mmo, *mn;
+
+ i915_gem_object_release_mmap_offset(obj);
+
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets,
+ offset) {
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ }
+ obj->mmo.offsets = RB_ROOT;
+ }
+}
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_mmap_offset *mmo, *mn;
-
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
@@ -204,18 +225,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
spin_unlock(&obj->vma.lock);
}
- i915_gem_object_release_mmap(obj);
-
- rbtree_postorder_for_each_entry_safe(mmo, mn,
- &obj->mmo.offsets,
- offset) {
- drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
- &mmo->vma_node);
- kfree(mmo);
- }
- obj->mmo.offsets = RB_ROOT;
+ __i915_gem_object_free_mmaps(obj);
- GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
atomic_set(&obj->mm.pages_pin_count, 0);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2faa481cc18f..e5b9276d254c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -258,10 +258,6 @@ struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
-
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
@@ -394,6 +390,8 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
+void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
+
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index af9e48ee4a33..7050519c87a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -408,6 +408,21 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
}
}
+void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!obj->mm.mapping);
+
+ /*
+ * We allow removing the mapping from underneath pinned pages!
+ *
+ * Furthermore, since this is an unsafe operation reserved only
+ * for construction time manipulation, we ignore locking prudence.
+ */
+ unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
+
+ i915_gem_object_unpin_map(obj);
+}
+
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n,
@@ -533,20 +548,6 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
return nth_page(sg_page(sg), offset);
}
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
-{
- struct page *page;
-
- page = i915_gem_object_get_page(obj, n);
- if (!obj->mm.dirty)
- set_page_dirty(page);
-
- return page;
-}
-
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 5b65ce738b16..dc8f052a0ffe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -13,6 +13,8 @@
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
+#include "gt/intel_gt_requests.h"
+
#include "i915_trace.h"
static bool swap_available(void)
@@ -111,15 +113,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
unsigned long count = 0;
unsigned long scanned = 0;
- /*
- * When shrinking the active list, we should also consider active
- * contexts. Active contexts are pinned until they are retired, and
- * so can not be simply unbound to retire and unpin their pages. To
- * shrink the contexts, we must wait until the gpu is idle and
- * completed its switch to the kernel context. In short, we do
- * not have a good mechanism for idling a specific context.
- */
-
trace_i915_gem_shrink(i915, target, shrink);
/*
@@ -134,6 +127,20 @@ i915_gem_shrink(struct drm_i915_private *i915,
}
/*
+ * When shrinking the active list, we should also consider active
+ * contexts. Active contexts are pinned until they are retired, and
+ * so can not be simply unbound to retire and unpin their pages. To
+ * shrink the contexts, we must wait until the gpu is idle and
+ * completed its switch to the kernel context. In short, we do
+ * not have a good mechanism for idling a specific context, but
+ * what we can do is give them a kick so that we do not keep idle
+ * contexts around longer than is necessary.
+ */
+ if (shrink & I915_SHRINK_ACTIVE)
+ /* Retire requests to unpin all idle contexts */
+ intel_gt_retire_requests(&i915->gt);
+
+ /*
* As we may completely rewrite the (un)bound list whilst unbinding
* (due to retiring requests) we have to strictly process only
* one element of the list at the time, and recheck the list
@@ -408,26 +415,15 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
struct mutex *mutex)
{
- bool unlock = false;
-
if (!IS_ENABLED(CONFIG_LOCKDEP))
return;
- if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
- mutex_acquire(&i915->drm.struct_mutex.dep_map,
- I915_MM_NORMAL, 0, _RET_IP_);
- unlock = true;
- }
-
fs_reclaim_acquire(GFP_KERNEL);
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
mutex_release(&mutex->dep_map, _RET_IP_);
fs_reclaim_release(GFP_KERNEL);
-
- if (unlock)
- mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
}
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 0158e49bf9bb..ff72ee2fd9cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -299,7 +299,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
i915_gem_object_unlock(obj);
/* Force the fence to be reacquired for GTT access */
- i915_gem_object_release_mmap(obj);
+ i915_gem_object_release_mmap_gtt(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index b81978890641..7ffc3c751432 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1229,7 +1229,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
int inst = 0;
int ret = 0;
- if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
+ if (INTEL_GEN(i915) < 9)
return 0;
if (flags & TEST_RESET)
@@ -1255,6 +1255,9 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
if (hweight32(engine->sseu.slice_mask) < 2)
continue;
+ if (!engine->gt->info.sseu.has_slice_pg)
+ continue;
+
/*
* Gen11 VME friendly power-gated configuration with
* half enabled sub-slices.
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index aa0d06cf1903..51b5a3421b40 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ mutex_init(&ctx->mutex);
+
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);
@@ -35,7 +37,7 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- mutex_init(&ctx->mutex);
+ mutex_init(&ctx->lut_mutex);
if (name) {
struct i915_ppgtt *ppgtt;
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
index 1de5fbaa1cf9..3a21cf63b3f0 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -9,6 +9,7 @@
#include "debugfs_engines.h"
#include "debugfs_gt.h"
#include "debugfs_gt_pm.h"
+#include "intel_sseu_debugfs.h"
#include "uc/intel_uc_debugfs.h"
#include "i915_drv.h"
@@ -25,6 +26,7 @@ void debugfs_gt_register(struct intel_gt *gt)
debugfs_engines_register(gt, root);
debugfs_gt_pm_register(gt, root);
+ intel_sseu_debugfs_register(gt, root);
intel_uc_debugfs_register(&gt->uc, root);
}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index f4fec7eb4064..cdc0b9c54305 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -183,13 +183,11 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt, *alloc = NULL;
- intel_wakeref_t wakeref;
+ bool flush = false;
u64 from = start;
unsigned int pde;
int ret = 0;
- wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
spin_lock(&pd->lock);
gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
@@ -214,14 +212,20 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
alloc = pt;
pt = pd->entry[pde];
}
+
+ flush = true;
}
atomic_add(count, &pt->used);
}
spin_unlock(&pd->lock);
- if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
- gen6_flush_pd(ppgtt, from, start);
+ if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
+ gen6_flush_pd(ppgtt, from, start);
+ }
goto out;
@@ -230,7 +234,6 @@ unwind_out:
out:
if (alloc)
free_px(vm, alloc);
- intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return ret;
}
@@ -299,11 +302,12 @@ static void pd_vma_clear_pages(struct i915_vma *vma)
vma->pages = NULL;
}
-static int pd_vma_bind(struct i915_vma *vma,
+static int pd_vma_bind(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct gen6_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
@@ -314,7 +318,7 @@ static int pd_vma_bind(struct i915_vma *vma,
return 0;
}
-static void pd_vma_unbind(struct i915_vma *vma)
+static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
struct gen6_ppgtt *ppgtt = vma->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index de595b66a746..d93d85cd3027 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -396,7 +396,7 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
i915_gem_object_flush_map(vma->obj);
- i915_gem_object_unpin_map(vma->obj);
+ __i915_gem_object_release_map(vma->obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d907d538176e..91786310c114 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -314,13 +314,18 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
lockdep_assert_held(&rq->lock);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return true;
+
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
struct intel_context *ce = rq->context;
struct list_head *pos;
spin_lock(&b->irq_lock);
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+
+ if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ goto unlock;
if (!__intel_breadcrumbs_arm_irq(b))
goto unlock;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index e4aece20bc80..52db2bde44a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -204,25 +204,25 @@ static int __ring_active(struct intel_ring *ring)
{
int err;
- err = i915_active_acquire(&ring->vma->active);
+ err = intel_ring_pin(ring);
if (err)
return err;
- err = intel_ring_pin(ring);
+ err = i915_active_acquire(&ring->vma->active);
if (err)
- goto err_active;
+ goto err_pin;
return 0;
-err_active:
- i915_active_release(&ring->vma->active);
+err_pin:
+ intel_ring_unpin(ring);
return err;
}
static void __ring_retire(struct intel_ring *ring)
{
- intel_ring_unpin(ring);
i915_active_release(&ring->vma->active);
+ intel_ring_unpin(ring);
}
__i915_active_call
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index 27ae48049239..b9c8163978a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -30,7 +30,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- *cs++ = intel_sseu_make_rpcs(rq->engine->i915, &sseu);
+ *cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
intel_ring_advance(rq, cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 7bf2f76212f0..dd1a42c4d344 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -370,7 +370,7 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
* instances.
*/
if ((INTEL_GEN(i915) >= 11 &&
- RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
+ engine->gt->info.vdbox_sfc_access & engine->mask) ||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
@@ -450,6 +450,80 @@ void intel_engines_free(struct intel_gt *gt)
}
}
+/*
+ * Determine which engines are fused off in our particular hardware.
+ * Note that we have a catch-22 situation where we need to be able to access
+ * the blitter forcewake domain to read the engine fuses, but at the same time
+ * we need to know which engines are available on the system to know which
+ * forcewake domains are present. We solve this by intializing the forcewake
+ * domains based on the full engine mask in the platform capabilities before
+ * calling this function and pruning the domains for fused-off engines
+ * afterwards.
+ */
+static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_gt_info *info = &gt->info;
+ struct intel_uncore *uncore = gt->uncore;
+ unsigned int logical_vdbox = 0;
+ unsigned int i;
+ u32 media_fuse;
+ u16 vdbox_mask;
+ u16 vebox_mask;
+
+ info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
+
+ if (INTEL_GEN(i915) < 11)
+ return info->engine_mask;
+
+ media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(gt, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vdbox_mask)) {
+ info->engine_mask &= ~BIT(_VCS(i));
+ drm_dbg(&i915->drm, "vcs%u fused off\n", i);
+ continue;
+ }
+
+ /*
+ * In Gen11, only even numbered logical VDBOXes are
+ * hooked up to an SFC (Scaler & Format Converter) unit.
+ * In TGL each VDBOX has access to an SFC.
+ */
+ if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
+ gt->info.vdbox_sfc_access |= BIT(i);
+ }
+ drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(gt));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
+
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(gt, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vebox_mask)) {
+ info->engine_mask &= ~BIT(_VECS(i));
+ drm_dbg(&i915->drm, "vecs%u fused off\n", i);
+ }
+ }
+ drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(gt));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+
+ return info->engine_mask;
+}
+
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @gt: pointer to struct intel_gt
@@ -459,8 +533,7 @@ void intel_engines_free(struct intel_gt *gt)
int intel_engines_init_mmio(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- struct intel_device_info *device_info = mkwrite_device_info(i915);
- const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
+ const unsigned int engine_mask = init_engine_mask(gt);
unsigned int mask = 0;
unsigned int i;
int err;
@@ -473,7 +546,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
- if (!HAS_ENGINE(i915, i))
+ if (!HAS_ENGINE(gt, i))
continue;
err = intel_engine_setup(gt, i);
@@ -489,14 +562,16 @@ int intel_engines_init_mmio(struct intel_gt *gt)
* engines.
*/
if (drm_WARN_ON(&i915->drm, mask != engine_mask))
- device_info->engine_mask = mask;
+ gt->info.engine_mask = mask;
- RUNTIME_INFO(i915)->num_engines = hweight32(mask);
+ gt->info.num_engines = hweight32(mask);
intel_gt_check_and_clear_faults(gt);
intel_setup_engine_capabilities(gt);
+ intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
+
return 0;
cleanup:
@@ -634,7 +709,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
/* Use the whole device by default */
engine->sseu =
- intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
+ intel_sseu_from_device_info(&engine->gt->info.sseu);
intel_engine_init_workarounds(engine);
intel_engine_init_whitelist(engine);
@@ -1000,7 +1075,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index d0a1078ef632..8ec3eecf3e39 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -142,6 +142,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
return true;
GEM_BUG_ON(!intel_context_is_barrier(ce));
+ GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 490af81bd6f3..8de92fd7d392 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -177,8 +177,12 @@ struct intel_engine_execlists {
* the first error interrupt, record the EIR and schedule the tasklet.
* In the tasklet, we process the pending CS events to ensure we have
* the guilty request, and then reset the engine.
+ *
+ * Low 16b are used by HW, with the upper 16b used as the enabling mask.
+ * Reserve the upper 16b for tracking internal errors.
*/
u32 error_interrupt;
+#define ERROR_CSB BIT(31)
/**
* @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 848decee9066..34e6096f196e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -201,7 +201,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
uabi_node);
char old[sizeof(engine->name)];
- if (intel_gt_has_init_error(engine->gt))
+ if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 323c328d444a..62979ea591f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -436,7 +436,8 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
-static int ggtt_bind_vma(struct i915_vma *vma,
+static int ggtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -451,15 +452,15 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
return 0;
}
-static void ggtt_unbind_vma(struct i915_vma *vma)
+static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma->node.start, vma->size);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -567,7 +568,8 @@ err:
return ret;
}
-static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -580,44 +582,27 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
+ struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;
- if (flags & I915_VMA_ALLOC) {
- ret = alias->vm.allocate_va_range(&alias->vm,
- vma->node.start,
- vma->size);
- if (ret)
- return ret;
-
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
- }
-
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
- __i915_vma_flags(vma)));
- alias->vm.insert_entries(&alias->vm, vma,
- cache_level, pte_flags);
+ ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
+ if (ret)
+ return ret;
}
if (flags & I915_VMA_GLOBAL_BIND)
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
return 0;
}
-static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
+static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
- struct i915_address_space *vm = vma->vm;
-
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
vm->clear_range(vm, vma->node.start, vma->size);
- }
-
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
- struct i915_address_space *vm =
- &i915_vm_to_ggtt(vma->vm)->alias->vm;
- vm->clear_range(vm, vma->node.start, vma->size);
- }
+ if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
+ ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
}
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ebc29b6ee86c..e0755f1a904b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -44,6 +44,14 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
gt->ggtt = ggtt;
}
+int intel_gt_init_mmio(struct intel_gt *gt)
+{
+ intel_uc_init_mmio(&gt->uc);
+ intel_sseu_info_init(gt);
+
+ return intel_engines_init_mmio(gt);
+}
+
static void init_unused_ring(struct intel_gt *gt, u32 base)
{
struct intel_uncore *uncore = gt->uncore;
@@ -510,7 +518,7 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
static void __intel_gt_disable(struct intel_gt *gt)
{
- intel_gt_set_wedged_on_init(gt);
+ intel_gt_set_wedged_on_fini(gt);
intel_gt_suspend_prepare(gt);
intel_gt_suspend_late(gt);
@@ -642,3 +650,11 @@ void intel_gt_driver_late_release(struct intel_gt *gt)
intel_gt_fini_timelines(gt);
intel_engines_free(gt);
}
+
+void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p)
+{
+ drm_printf(p, "available engines: %x\n", info->engine_mask);
+
+ intel_sseu_dump(&info->sseu, p);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 4fac043750aa..9157c7411f60 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -11,6 +11,7 @@
#include "intel_reset.h"
struct drm_i915_private;
+struct drm_printer;
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
@@ -35,6 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
void intel_gt_driver_register(struct intel_gt *gt);
@@ -58,14 +60,21 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
return i915_ggtt_offset(gt->scratch) + field;
}
-static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
+static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
{
- return __intel_reset_failed(&gt->reset);
+ return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags) ||
+ test_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
}
-static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
+static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
{
- return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+ GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
+ !test_bit(I915_WEDGED, &gt->reset.flags));
+
+ return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
}
+void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 0cc7dd54f4f9..b05da68e52f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -27,7 +27,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
u32 eir;
- eir = ENGINE_READ(engine, RING_EIR);
+ /* Upper 16b are the enabling mask, rsvd for internal errors */
+ eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
ENGINE_TRACE(engine, "CS error: %x\n", eir);
/* Disable the error interrupt until after the reset */
@@ -457,7 +458,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_ENGINE(gt->i915, VECS0)) {
+ if (HAS_ENGINE(gt, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index f1d5333f9456..274aa0dd7050 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -188,7 +188,7 @@ int intel_gt_resume(struct intel_gt *gt)
enum intel_engine_id id;
int err;
- err = intel_gt_has_init_error(gt);
+ err = intel_gt_has_unrecoverable_error(gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 16ff47c83bd5..66fcbf9d0fdd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -31,12 +31,15 @@ static bool engine_active(const struct intel_engine_cs *engine)
return !list_empty(&engine->kernel_context->timeline->requests);
}
-static bool flush_submission(struct intel_gt *gt)
+static bool flush_submission(struct intel_gt *gt, long timeout)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
bool active = false;
+ if (!timeout)
+ return false;
+
if (!intel_gt_pm_is_awake(gt))
return false;
@@ -139,7 +142,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
if (unlikely(timeout < 0))
timeout = -timeout, interruptible = false;
- flush_submission(gt); /* kick the ksoftirqd tasklets */
+ flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
@@ -194,7 +197,7 @@ out_active: spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
- if (flush_submission(gt)) /* Wait, there's more! */
+ if (flush_submission(gt, timeout)) /* Wait, there's more! */
active_count++;
return active_count ? timeout : 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 0cc1d6b185dc..6d39a4a11bf3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -109,6 +109,17 @@ struct intel_gt {
struct intel_gt_buffer_pool buffer_pool;
struct i915_vma *scratch;
+
+ struct intel_gt_info {
+ intel_engine_mask_t engine_mask;
+ u8 num_engines;
+
+ /* Media engine access to SFC per instance */
+ u8 vdbox_sfc_access;
+
+ /* Slice/subslice/EU info */
+ struct sseu_dev_info sseu;
+ } info;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index d93ebdf3fa0e..f2b75078e05f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -198,14 +198,16 @@ struct intel_gt;
struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
+ int (*bind_vma)(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
/*
* Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page.
*/
- void (*unbind_vma)(struct i915_vma *vma);
+ void (*unbind_vma)(struct i915_address_space *vm,
+ struct i915_vma *vma);
int (*set_pages)(struct i915_vma *vma);
void (*clear_pages)(struct i915_vma *vma);
@@ -566,6 +568,13 @@ int ggtt_set_pages(struct i915_vma *vma);
int ppgtt_set_pages(struct i915_vma *vma);
void clear_pages(struct i915_vma *vma);
+int ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+void ppgtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma);
+
void gtt_write_workarounds(struct intel_gt *gt);
void setup_private_pat(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e866b8d721ed..e0280a672f1d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2569,6 +2569,25 @@ static void process_csb(struct intel_engine_cs *engine)
return;
/*
+ * We will consume all events from HW, or at least pretend to.
+ *
+ * The sequence of events from the HW is deterministic, and derived
+ * from our writes to the ELSP, with a smidgen of variability for
+ * the arrival of the asynchronous requests wrt to the inflight
+ * execution. If the HW sends an event that does not correspond with
+ * the one we are expecting, we have to abandon all hope as we lose
+ * all tracking of what the engine is actually executing. We will
+ * only detect we are out of sequence with the HW when we get an
+ * 'impossible' event because we have already drained our own
+ * preemption/promotion queue. If this occurs, we know that we likely
+ * lost track of execution earlier and must unwind and restart, the
+ * simplest way is by stop processing the event queue and force the
+ * engine to reset.
+ */
+ execlists->csb_head = tail;
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+ /*
* Hopefully paired with a wmb() in HW!
*
* We must complete the read of the write pointer before any reads
@@ -2577,8 +2596,6 @@ static void process_csb(struct intel_engine_cs *engine)
* we perform the READ_ONCE(*csb_write).
*/
rmb();
-
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
do {
bool promote;
@@ -2613,6 +2630,11 @@ static void process_csb(struct intel_engine_cs *engine)
if (promote) {
struct i915_request * const *old = execlists->active;
+ if (GEM_WARN_ON(!*execlists->pending)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
ring_set_paused(engine, 0);
/* Point active to the new ELSP; prevent overwriting */
@@ -2635,7 +2657,10 @@ static void process_csb(struct intel_engine_cs *engine)
WRITE_ONCE(execlists->pending[0], NULL);
} else {
- GEM_BUG_ON(!*execlists->active);
+ if (GEM_WARN_ON(!*execlists->active)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
/* port0 completed, advanced to port1 */
trace_ports(execlists, "completed", execlists->active);
@@ -2686,7 +2711,6 @@ static void process_csb(struct intel_engine_cs *engine)
}
} while (head != tail);
- execlists->csb_head = head;
set_timeslice(engine);
/*
@@ -3005,12 +3029,12 @@ static u32 active_ccid(struct intel_engine_cs *engine)
return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
}
-static bool execlists_capture(struct intel_engine_cs *engine)
+static void execlists_capture(struct intel_engine_cs *engine)
{
struct execlists_capture *cap;
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
- return true;
+ return;
/*
* We need to _quickly_ capture the engine state before we reset.
@@ -3019,7 +3043,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
*/
cap = capture_regs(engine);
if (!cap)
- return true;
+ return;
spin_lock_irq(&engine->active.lock);
cap->rq = active_context(engine, active_ccid(engine));
@@ -3056,14 +3080,13 @@ static bool execlists_capture(struct intel_engine_cs *engine)
INIT_WORK(&cap->work, execlists_capture_work);
schedule_work(&cap->work);
- return true;
+ return;
err_rq:
i915_request_put(cap->rq);
err_free:
i915_gpu_coredump_put(cap->error);
kfree(cap);
- return false;
}
static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
@@ -3083,10 +3106,8 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
tasklet_disable_nosync(&engine->execlists.tasklet);
ring_set_paused(engine, 1); /* Freeze the current request in place */
- if (execlists_capture(engine))
- intel_engine_reset(engine, msg);
- else
- ring_set_paused(engine, 0);
+ execlists_capture(engine);
+ intel_engine_reset(engine, msg);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
@@ -3117,9 +3138,18 @@ static void execlists_submission_tasklet(unsigned long data)
process_csb(engine);
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ const char *msg;
+
+ /* Generate the error message in priority wrt to the user! */
+ if (engine->execlists.error_interrupt & GENMASK(15, 0))
+ msg = "CS error"; /* thrown by a user payload */
+ else if (engine->execlists.error_interrupt & ERROR_CSB)
+ msg = "invalid CSB event";
+ else
+ msg = "internal error";
+
engine->execlists.error_interrupt = 0;
- if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */
- execlists_reset(engine, "CS error");
+ execlists_reset(engine, msg);
}
if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
@@ -3422,7 +3452,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
/* RPCS */
if (engine->class == RENDER_CLASS) {
regs[CTX_R_PWR_CLK_STATE] =
- intel_sseu_make_rpcs(engine->i915, &ce->sseu);
+ intel_sseu_make_rpcs(engine->gt, &ce->sseu);
i915_oa_init_reg_state(ce, engine);
}
@@ -3880,7 +3910,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
&wa_ctx->per_ctx };
wa_bb_func_t wa_bb_fn[2];
- struct page *page;
void *batch, *batch_ptr;
unsigned int i;
int ret;
@@ -3916,14 +3945,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
- page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
- batch = batch_ptr = kmap_atomic(page);
+ batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
/*
* Emit the two workaround batch buffers, recording the offset from the
* start of the workaround batch buffer object for each and their
* respective sizes.
*/
+ batch_ptr = batch;
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
@@ -3935,10 +3964,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
+ GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
- BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
-
- kunmap_atomic(batch);
+ __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
+ __i915_gem_object_release_map(wa_ctx->vma->obj);
if (ret)
lrc_destroy_wa_ctx(engine);
@@ -5403,13 +5432,8 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
* typically be the first we inspect for submission.
*/
swp = prandom_u32_max(ve->num_siblings);
- if (!swp)
- return;
-
- swap(ve->siblings[swp], ve->siblings[0]);
- if (!intel_engine_has_relative_mmio(ve->siblings[0]))
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- ve->siblings[0]);
+ if (swp)
+ swap(ve->siblings[swp], ve->siblings[0]);
}
static int virtual_context_alloc(struct intel_context *ce)
@@ -5422,15 +5446,9 @@ static int virtual_context_alloc(struct intel_context *ce)
static int virtual_context_pin(struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- int err;
/* Note: we must use a real engine class for setting up reg state */
- err = __execlists_context_pin(ce, ve->siblings[0]);
- if (err)
- return err;
-
- virtual_engine_initial_hint(ve);
- return 0;
+ return __execlists_context_pin(ce, ve->siblings[0]);
}
static void virtual_context_enter(struct intel_context *ce)
@@ -5695,6 +5713,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base);
+ ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */
ve->base.cops = &virtual_context_ops;
ve->base.request_alloc = execlists_request_alloc;
@@ -5776,6 +5795,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+ virtual_engine_initial_hint(ve);
return &ve->context;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index f86f7e68ce5e..f0862e924d11 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -155,16 +155,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
return ppgtt;
}
-static int ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+int ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
u32 pte_flags;
int err;
- if (flags & I915_VMA_ALLOC) {
- err = vma->vm->allocate_va_range(vma->vm,
- vma->node.start, vma->size);
+ if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+ err = vm->allocate_va_range(vm, vma->node.start, vma->size);
if (err)
return err;
@@ -176,17 +176,16 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
wmb();
return 0;
}
-static void ppgtt_unbind_vma(struct i915_vma *vma)
+void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma->node.start, vma->size);
}
int ppgtt_set_pages(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 6db23389e427..1bfad589c63b 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -150,7 +150,7 @@ static int render_state_setup(struct intel_renderstate *so,
ret = 0;
out:
__i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
- i915_gem_object_unpin_map(so->vma->obj);
+ __i915_gem_object_release_map(so->vma->obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 0156f1f5c736..46a5ceffc22f 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -342,7 +342,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
{
struct intel_uncore *uncore = engine->uncore;
- u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
+ u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
i915_reg_t sfc_usage;
@@ -417,7 +417,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
- u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
+ u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
i915_reg_t sfc_forced_lock;
u32 sfc_forced_lock_bit;
@@ -880,7 +880,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
return true;
/* Never fully initialised, recovery impossible */
- if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
+ if (intel_gt_has_unrecoverable_error(gt))
return false;
GT_TRACE(gt, "start\n");
@@ -930,7 +930,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* Warn CI about the unrecoverable wedged condition.
* Time for a reboot.
*/
- add_taint_for_CI(TAINT_WARN);
+ add_taint_for_CI(gt->i915, TAINT_WARN);
return false;
}
@@ -1097,7 +1097,7 @@ taint:
* rather than continue on into oblivion. For everyone else,
* the system should still plod along, but they have been warned!
*/
- add_taint_for_CI(TAINT_WARN);
+ add_taint_for_CI(gt->i915, TAINT_WARN);
error:
__intel_gt_set_wedged(gt);
goto finish;
@@ -1246,7 +1246,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
*/
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
+ engine_mask &= gt->info.engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(gt->i915);
@@ -1342,7 +1342,7 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
if (!intel_gt_is_wedged(gt))
return 0;
- if (intel_gt_has_init_error(gt))
+ if (intel_gt_has_unrecoverable_error(gt))
return -EIO;
/* Reset still in progress? Maybe we will recover? */
@@ -1360,6 +1360,15 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
I915_WEDGED_ON_INIT);
intel_gt_set_wedged(gt);
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+
+ /* Wedged on init is non-recoverable */
+ add_taint_for_CI(gt->i915, TAINT_WARN);
+}
+
+void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
+{
+ intel_gt_set_wedged(gt);
+ set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
}
void intel_gt_init_reset(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 8e8d5f761166..a0eec7c11c0c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -47,8 +47,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt);
/*
* There's no unset_wedged_on_init paired with this one.
* Once we're wedged on init, there's no going back.
+ * Same thing for unset_wedged_on_fini.
*/
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
+void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
@@ -71,14 +73,6 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
(W)->gt; \
__intel_fini_wedge((W)))
-static inline bool __intel_reset_failed(const struct intel_reset *reset)
-{
- GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
- !test_bit(I915_WEDGED, &reset->flags) : false);
-
- return unlikely(test_bit(I915_WEDGED, &reset->flags));
-}
-
bool intel_has_gpu_reset(const struct intel_gt *gt);
bool intel_has_reset_engine(const struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index f43bc3a0fe4f..add6b86d9d03 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -34,12 +34,17 @@ struct intel_reset {
* longer use the GPU - similar to #I915_WEDGED bit. The difference in
* in the way we're handling "forced" unwedged (e.g. through debugfs),
* which is not allowed in case we failed to initialize.
+ *
+ * #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we
+ * use it to mark that the GPU is no longer available (and prevent
+ * users from using it).
*/
unsigned long flags;
#define I915_RESET_BACKOFF 0
#define I915_RESET_MODESET 1
#define I915_RESET_ENGINE 2
-#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2)
+#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 3)
+#define I915_WEDGED_ON_FINI (BITS_PER_LONG - 2)
#define I915_WEDGED (BITS_PER_LONG - 1)
struct mutex mutex; /* serialises wedging/unwedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 68a08486fc87..94915f668715 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -543,7 +543,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
vaddr, engine->context_size);
i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_map(obj);
}
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
@@ -649,7 +649,7 @@ static inline int mi_set_context(struct i915_request *rq,
struct drm_i915_private *i915 = engine->i915;
enum intel_engine_id id;
const int num_engines =
- IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
+ IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 296391deeb94..97ba14ad52e4 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1062,11 +1062,12 @@ static bool gen6_rps_enable(struct intel_rps *rps)
static int chv_rps_max_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_gt *gt = rps_to_gt(rps);
u32 val;
val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (RUNTIME_INFO(i915)->sseu.eu_total) {
+ switch (gt->info.sseu.eu_total) {
case 8:
/* (2 * 4) config */
val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index d173271c7397..f1c039e1b5ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -60,10 +60,552 @@ intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
return hweight32(intel_sseu_get_subslices(sseu, slice));
}
-u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int slice_stride = sseu->max_subslices * sseu->eu_stride;
+
+ return slice * slice_stride + subslice * sseu->eu_stride;
+}
+
+static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+ u16 eu_mask = 0;
+
+ for (i = 0; i < sseu->eu_stride; i++)
+ eu_mask |=
+ ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE);
+
+ return eu_mask;
+}
+
+static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
+ u16 eu_mask)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+
+ for (i = 0; i < sseu->eu_stride; i++)
+ sseu->eu_mask[offset + i] =
+ (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+}
+
+static u16 compute_eu_total(const struct sseu_dev_info *sseu)
+{
+ u16 i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
+ total += hweight8(sseu->eu_mask[i]);
+
+ return total;
+}
+
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u8 s_en, u32 ss_en, u16 eu_en)
+{
+ int s, ss;
+
+ /* ss_en represents entire subslice mask across all slices */
+ GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
+ sizeof(ss_en) * BITS_PER_BYTE);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ if ((s_en & BIT(s)) == 0)
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ intel_sseu_set_subslices(sseu, s, ss_en);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, s, ss))
+ sseu_set_eus(sseu, s, ss, eu_en);
+ }
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void gen12_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 dss_en;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ u8 s_en;
+ int eu;
+
+ /*
+ * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
+ * Instead of splitting these, provide userspace with an array
+ * of DSS to more closely represent the hardware resource.
+ */
+ intel_sseu_set_info(sseu, 1, 6, 16);
+
+ s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+ GEN11_GT_S_ENA_MASK;
+
+ dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE);
+
+ /* one bit per pair of EUs */
+ eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+ GEN11_EU_DIS_MASK);
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+
+ /* TGL only supports slice-level power gating */
+ sseu->has_slice_pg = 1;
+}
+
+static void gen11_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 ss_en;
+ u8 eu_en;
+ u8 s_en;
+
+ if (IS_ELKHARTLAKE(gt->i915))
+ intel_sseu_set_info(sseu, 1, 4, 8);
+ else
+ intel_sseu_set_info(sseu, 1, 8, 8);
+
+ s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+ GEN11_GT_S_ENA_MASK;
+ ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
+
+ eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+ GEN11_EU_DIS_MASK);
+
+ gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
+
+ /* ICL has no power gating restrictions. */
+ sseu->has_slice_pg = 1;
+ sseu->has_subslice_pg = 1;
+ sseu->has_eu_pg = 1;
+}
+
+static void gen10_sseu_info_init(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+ const int eu_mask = 0xff;
+ u32 subslice_mask, eu_en;
+ int s, ss;
+
+ intel_sseu_set_info(sseu, 6, 4, 8);
+
+ sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
+ GEN10_F2_S_ENA_SHIFT;
+
+ /* Slice0 */
+ eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0);
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
+ /* Slice1 */
+ sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1);
+ sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
+ /* Slice2 */
+ sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
+ sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
+ /* Slice3 */
+ sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2);
+ sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
+ /* Slice4 */
+ sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
+ sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
+ /* Slice5 */
+ sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3);
+ sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
+
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ u32 subslice_mask_with_eus = subslice_mask;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ if (sseu_get_eus(sseu, s, ss) == 0)
+ subslice_mask_with_eus &= ~BIT(ss);
+ }
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ intel_sseu_set_subslices(sseu, s, s == 0 ?
+ subslice_mask_with_eus :
+ subslice_mask_with_eus & 0x3);
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * CNL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery.
+ */
+ sseu->eu_per_subslice =
+ intel_sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+ 0;
+
+ /* No restrictions on Power Gating */
+ sseu->has_slice_pg = 1;
+ sseu->has_subslice_pg = 1;
+ sseu->has_eu_pg = 1;
+}
+
+static void cherryview_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ u32 fuse;
+ u8 subslice_mask = 0;
+
+ fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT);
+
+ sseu->slice_mask = BIT(0);
+ intel_sseu_set_info(sseu, 1, 2, 8);
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
+
+ subslice_mask |= BIT(0);
+ sseu_set_eus(sseu, 0, 0, ~disabled_mask);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
+
+ subslice_mask |= BIT(1);
+ sseu_set_eus(sseu, 0, 1, ~disabled_mask);
+ }
+
+ intel_sseu_set_subslices(sseu, 0, subslice_mask);
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
+ sseu->eu_total /
+ intel_sseu_subslice_total(sseu) :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
+ sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_device_info *info = mkwrite_device_info(i915);
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 fuse2, eu_disable, subslice_mask;
+ const u8 eu_mask = 0xff;
+ int s, ss;
+
+ fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+ sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+
+ /* BXT has a single slice and at most 3 subslices. */
+ intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3,
+ IS_GEN9_LP(i915) ? 3 : 4, 8);
+
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ subslice_mask = (1 << sseu->max_subslices) - 1;
+ subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT);
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < sseu->max_slices; s++) {
+ if (!(sseu->slice_mask & BIT(s)))
+ /* skip disabled slice */
+ continue;
+
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+ eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ int eu_per_ss;
+ u8 eu_disabled_mask;
+
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+ eu_per_ss = sseu->max_eus_per_subslice -
+ hweight8(eu_disabled_mask);
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_per_ss == 7)
+ sseu->subslice_7eu[s] |= BIT(ss);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery. BXT is expected to be perfectly uniform in EU
+ * distribution.
+ */
+ sseu->eu_per_subslice =
+ intel_sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+ 0;
+
+ /*
+ * SKL+ supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice. BXT+ supports subslice
+ * power gating on devices with more than one subslice, and
+ * supports EU power gating on devices with more than one EU
+ * pair per subslice.
+ */
+ sseu->has_slice_pg =
+ !IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1;
+ sseu->has_subslice_pg =
+ IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1;
+ sseu->has_eu_pg = sseu->eu_per_subslice > 2;
+
+ if (IS_GEN9_LP(i915)) {
+#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
+ info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
+
+ sseu->min_eu_in_pool = 0;
+ if (info->has_pooled_eu) {
+ if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+ sseu->min_eu_in_pool = 3;
+ else if (IS_SS_DISABLED(1))
+ sseu->min_eu_in_pool = 6;
+ else
+ sseu->min_eu_in_pool = 9;
+ }
+#undef IS_SS_DISABLED
+ }
+}
+
+static void bdw_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ int s, ss;
+ u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
+ u32 eu_disable0, eu_disable1, eu_disable2;
+
+ fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+ sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ intel_sseu_set_info(sseu, 3, 3, 8);
+
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
+ subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+ GEN8_F2_SS_DIS_SHIFT);
+ eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0);
+ eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1);
+ eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2);
+ eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK;
+ eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) |
+ ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) <<
+ (32 - GEN8_EU_DIS0_S1_SHIFT));
+ eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) |
+ ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) <<
+ (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < sseu->max_slices; s++) {
+ if (!(sseu->slice_mask & BIT(s)))
+ /* skip disabled slice */
+ continue;
+
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u8 eu_disabled_mask;
+ u32 n_disabled;
+
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_disabled_mask =
+ eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+ n_disabled = hweight8(eu_disabled_mask);
+
+ /*
+ * Record which subslices have 7 EUs.
+ */
+ if (sseu->max_eus_per_subslice - n_disabled == 7)
+ sseu->subslice_7eu[s] |= 1 << ss;
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * BDW is expected to always have a uniform distribution of EU across
+ * subslices with the exception that any one EU in any one subslice may
+ * be fused off for die recovery.
+ */
+ sseu->eu_per_subslice =
+ intel_sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+ 0;
+
+ /*
+ * BDW supports slice power gating on devices with more than
+ * one slice.
+ */
+ sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
+}
+
+static void hsw_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ u32 fuse1;
+ u8 subslice_mask = 0;
+ int s, ss;
+
+ /*
+ * There isn't a register to tell us how many slices/subslices. We
+ * work off the PCI-ids here.
+ */
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ MISSING_CASE(INTEL_INFO(i915)->gt);
+ fallthrough;
+ case 1:
+ sseu->slice_mask = BIT(0);
+ subslice_mask = BIT(0);
+ break;
+ case 2:
+ sseu->slice_mask = BIT(0);
+ subslice_mask = BIT(0) | BIT(1);
+ break;
+ case 3:
+ sseu->slice_mask = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
+ break;
+ }
+
+ fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
+ switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
+ default:
+ MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
+ HSW_F1_EU_DIS_SHIFT);
+ fallthrough;
+ case HSW_F1_EU_DIS_10EUS:
+ sseu->eu_per_subslice = 10;
+ break;
+ case HSW_F1_EU_DIS_8EUS:
+ sseu->eu_per_subslice = 8;
+ break;
+ case HSW_F1_EU_DIS_6EUS:
+ sseu->eu_per_subslice = 6;
+ break;
+ }
+
+ intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
+ hweight8(subslice_mask),
+ sseu->eu_per_subslice);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ sseu_set_eus(sseu, s, ss,
+ (1UL << sseu->eu_per_subslice) - 1);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /* No powergating for you. */
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
+}
+
+void intel_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_HASWELL(i915))
+ hsw_sseu_info_init(gt);
+ else if (IS_CHERRYVIEW(i915))
+ cherryview_sseu_info_init(gt);
+ else if (IS_BROADWELL(i915))
+ bdw_sseu_info_init(gt);
+ else if (IS_GEN(i915, 9))
+ gen9_sseu_info_init(gt);
+ else if (IS_GEN(i915, 10))
+ gen10_sseu_info_init(gt);
+ else if (IS_GEN(i915, 11))
+ gen11_sseu_info_init(gt);
+ else if (INTEL_GEN(i915) >= 12)
+ gen12_sseu_info_init(gt);
+}
+
+u32 intel_sseu_make_rpcs(struct intel_gt *gt,
const struct intel_sseu *req_sseu)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ struct drm_i915_private *i915 = gt->i915;
+ const struct sseu_dev_info *sseu = &gt->info.sseu;
bool subslice_pg = sseu->has_subslice_pg;
u8 slices, subslices;
u32 rpcs = 0;
@@ -173,3 +715,48 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
return rpcs;
}
+
+void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
+{
+ int s;
+
+ drm_printf(p, "slice total: %u, mask=%04x\n",
+ hweight8(sseu->slice_mask), sseu->slice_mask);
+ drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
+ for (s = 0; s < sseu->max_slices; s++) {
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
+ s, intel_sseu_subslices_per_slice(sseu, s),
+ intel_sseu_get_subslices(sseu, s));
+ }
+ drm_printf(p, "EU total: %u\n", sseu->eu_total);
+ drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
+ drm_printf(p, "has slice power gating: %s\n",
+ yesno(sseu->has_slice_pg));
+ drm_printf(p, "has subslice power gating: %s\n",
+ yesno(sseu->has_subslice_pg));
+ drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
+}
+
+void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ int s, ss;
+
+ if (sseu->max_slices == 0) {
+ drm_printf(p, "Unavailable\n");
+ return;
+ }
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
+ s, intel_sseu_subslices_per_slice(sseu, s),
+ intel_sseu_get_subslices(sseu, s));
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u16 enabled_eus = sseu_get_eus(sseu, s, ss);
+
+ drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
+ ss, hweight16(enabled_eus), enabled_eus);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index d1d225204f09..23ba6c2ebe70 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -13,6 +13,8 @@
#include "i915_gem.h"
struct drm_i915_private;
+struct intel_gt;
+struct drm_printer;
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
@@ -94,7 +96,13 @@ u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
u32 ss_mask);
-u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+void intel_sseu_info_init(struct intel_gt *gt);
+
+u32 intel_sseu_make_rpcs(struct intel_gt *gt,
const struct intel_sseu *req_sseu);
+void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
+void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p);
+
#endif /* __INTEL_SSEU_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
new file mode 100644
index 000000000000..51780282d872
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "debugfs_gt.h"
+#include "intel_sseu_debugfs.h"
+#include "i915_drv.h"
+
+static void sseu_copy_subslices(const struct sseu_dev_info *sseu,
+ int slice, u8 *to_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
+}
+
+static void cherryview_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 2
+ struct intel_uncore *uncore = gt->uncore;
+ const int ss_max = SS_MAX;
+ u32 sig1[SS_MAX], sig2[SS_MAX];
+ int ss;
+
+ sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1);
+ sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1);
+ sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2);
+ sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2);
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (sig1[ss] & CHV_SS_PG_ENABLE)
+ /* skip disabled subslice */
+ continue;
+
+ sseu->slice_mask = BIT(0);
+ sseu->subslice_mask[0] |= BIT(ss);
+ eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+ ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice, eu_cnt);
+ }
+#undef SS_MAX
+}
+
+static void gen10_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 6
+ struct intel_uncore *uncore = gt->uncore;
+ const struct intel_gt_info *info = &gt->info;
+ u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
+ int s, ss;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ /*
+ * FIXME: Valid SS Mask respects the spec and read
+ * only valid bits for those registers, excluding reserved
+ * although this seems wrong because it would leave many
+ * subslices without ACK.
+ */
+ s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) &
+ GEN10_PGCTL_VALID_SS_MASK(s);
+ eu_reg[2 * s] = intel_uncore_read(uncore,
+ GEN10_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2 * s + 1] = intel_uncore_read(uncore,
+ GEN10_SS23_EU_PGCTL_ACK(s));
+ }
+
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+ sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
+
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+ unsigned int eu_cnt;
+
+ if (info->sseu.has_subslice_pg &&
+ !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
+ eu_mask[ss % 2]);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
+ }
+ }
+#undef SS_MAX
+}
+
+static void gen9_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 3
+ struct intel_uncore *uncore = gt->uncore;
+ const struct intel_gt_info *info = &gt->info;
+ u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
+ int s, ss;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s));
+ eu_reg[2 * s] =
+ intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2 * s + 1] =
+ intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s));
+ }
+
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ if (IS_GEN9_BC(gt->i915))
+ sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
+
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+ unsigned int eu_cnt;
+ u8 ss_idx = s * info->sseu.ss_stride +
+ ss / BITS_PER_BYTE;
+
+ if (IS_GEN9_LP(gt->i915)) {
+ if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ sseu->subslice_mask[ss_idx] |=
+ BIT(ss % BITS_PER_BYTE);
+ }
+
+ eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
+ eu_cnt = 2 * hweight32(eu_cnt);
+
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
+ }
+ }
+#undef SS_MAX
+}
+
+static void bdw_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+ const struct intel_gt_info *info = &gt->info;
+ u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO);
+ int s;
+
+ sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
+
+ if (sseu->slice_mask) {
+ sseu->eu_per_subslice = info->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
+ sseu->eu_total = sseu->eu_per_subslice *
+ intel_sseu_subslice_total(sseu);
+
+ /* subtract fused off EU(s) from enabled slice(s) */
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ u8 subslice_7eu = info->sseu.subslice_7eu[s];
+
+ sseu->eu_total -= hweight8(subslice_7eu);
+ }
+ }
+}
+
+static void i915_print_sseu_info(struct seq_file *m,
+ bool is_available_info,
+ bool has_pooled_eu,
+ const struct sseu_dev_info *sseu)
+{
+ const char *type = is_available_info ? "Available" : "Enabled";
+ int s;
+
+ seq_printf(m, " %s Slice Mask: %04x\n", type,
+ sseu->slice_mask);
+ seq_printf(m, " %s Slice Total: %u\n", type,
+ hweight8(sseu->slice_mask));
+ seq_printf(m, " %s Subslice Total: %u\n", type,
+ intel_sseu_subslice_total(sseu));
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ seq_printf(m, " %s Slice%i subslices: %u\n", type,
+ s, intel_sseu_subslices_per_slice(sseu, s));
+ }
+ seq_printf(m, " %s EU Total: %u\n", type,
+ sseu->eu_total);
+ seq_printf(m, " %s EU Per Subslice: %u\n", type,
+ sseu->eu_per_subslice);
+
+ if (!is_available_info)
+ return;
+
+ seq_printf(m, " Has Pooled EU: %s\n", yesno(has_pooled_eu));
+ if (has_pooled_eu)
+ seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
+
+ seq_printf(m, " Has Slice Power Gating: %s\n",
+ yesno(sseu->has_slice_pg));
+ seq_printf(m, " Has Subslice Power Gating: %s\n",
+ yesno(sseu->has_subslice_pg));
+ seq_printf(m, " Has EU Power Gating: %s\n",
+ yesno(sseu->has_eu_pg));
+}
+
+/*
+ * this is called from top-level debugfs as well, so we can't get the gt from
+ * the seq_file.
+ */
+int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const struct intel_gt_info *info = &gt->info;
+ struct sseu_dev_info sseu;
+ intel_wakeref_t wakeref;
+
+ if (INTEL_GEN(i915) < 8)
+ return -ENODEV;
+
+ seq_puts(m, "SSEU Device Info\n");
+ i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
+
+ seq_puts(m, "SSEU Device Status\n");
+ memset(&sseu, 0, sizeof(sseu));
+ intel_sseu_set_info(&sseu, info->sseu.max_slices,
+ info->sseu.max_subslices,
+ info->sseu.max_eus_per_subslice);
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ if (IS_CHERRYVIEW(i915))
+ cherryview_sseu_device_status(gt, &sseu);
+ else if (IS_BROADWELL(i915))
+ bdw_sseu_device_status(gt, &sseu);
+ else if (IS_GEN(i915, 9))
+ gen9_sseu_device_status(gt, &sseu);
+ else if (INTEL_GEN(i915) >= 10)
+ gen10_sseu_device_status(gt, &sseu);
+ }
+
+ i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu);
+
+ return 0;
+}
+
+static int sseu_status_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+
+ return intel_sseu_status(m, gt);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(sseu_status);
+
+static int rcs_topology_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_sseu_print_topology(&gt->info.sseu, &p);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
+
+void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "sseu_status", &sseu_status_fops, NULL },
+ { "rcs_topology", &rcs_topology_fops, NULL },
+ };
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h
new file mode 100644
index 000000000000..73f001589e90
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef INTEL_SSEU_DEBUGFS_H
+#define INTEL_SSEU_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+struct seq_file;
+
+int intel_sseu_status(struct seq_file *m, struct intel_gt *gt);
+void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* INTEL_SSEU_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 4546284fede1..46d20f5f3ddc 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -73,6 +73,8 @@ hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
return vma;
}
+ GT_TRACE(timeline->gt, "new HWSP allocated\n");
+
vma->private = hwsp;
hwsp->gt = timeline->gt;
hwsp->vma = vma;
@@ -327,6 +329,8 @@ int intel_timeline_pin(struct intel_timeline *tl)
tl->hwsp_offset =
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
+ GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ tl->fence_context, tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline);
if (atomic_fetch_inc(&tl->pin_count)) {
@@ -434,6 +438,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
int err;
might_lock(&tl->gt->ggtt->vm.mutex);
+ GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
/*
* If there is an outstanding GPU reference to this cacheline,
@@ -497,6 +502,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
tl->hwsp_offset += i915_ggtt_offset(vma);
+ GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ tl->fence_context, tl->hwsp_offset);
cacheline_acquire(cl);
tl->hwsp_cacheline = cl;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 2da366821dda..5726cd0a37e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -404,7 +404,7 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = engine->i915;
+ struct intel_gt *gt = engine->gt;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -415,7 +415,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
continue;
/*
@@ -424,7 +424,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
@@ -1036,7 +1036,7 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
unsigned int slice, subslice;
u32 l3_en, mcr, mcr_mask;
@@ -1649,11 +1649,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
- /* Wa_1407928979:tgl */
- wa_write_or(wal,
- GEN7_FF_THREAD_MODE,
- GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
-
/* Wa_1408615072:tgl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
@@ -1677,6 +1672,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_14010229206:tgl
*/
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+ /*
+ * Wa_1407928979:tgl A*
+ * Wa_18011464164:tgl B0+
+ * Wa_22010931296:tgl B0+
+ */
+ wa_write_or(wal, GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
}
if (IS_GEN(i915, 11)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index daa4aabab9a7..3fc5de961280 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -963,7 +963,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
goto out;
if (i915_request_wait(head, 0,
- 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
+ 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
count, n);
GEM_TRACE_DUMP();
@@ -3569,8 +3569,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
}
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
- count, flags,
- RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
return 0;
}
@@ -3597,8 +3596,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
- count, flags,
- RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 3c8434846fa1..64ef5ee5decf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -233,7 +233,7 @@ int live_rc6_ctx_wa(void *arg)
i915_reset_engine_count(error, engine)) {
pr_err("%s: GPU reset required\n",
engine->name);
- add_taint_for_CI(TAINT_WARN);
+ add_taint_for_CI(gt->i915, TAINT_WARN);
err = -EIO;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index bb753f0c12eb..8624f5d2a1f3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -29,9 +29,9 @@ static int cmp_u64(const void *A, const void *B)
{
const u64 *a = A, *b = B;
- if (a < b)
+ if (*a < *b)
return -1;
- else if (a > b)
+ else if (*a > *b)
return 1;
else
return 0;
@@ -41,9 +41,9 @@ static int cmp_u32(const void *A, const void *B)
{
const u32 *a = A, *b = B;
- if (a < b)
+ if (*a < *b)
return -1;
- else if (a > b)
+ else if (*a > *b)
return 1;
else
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index fcdee951579b..fb5b7d3498a6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -562,8 +562,9 @@ static int live_hwsp_engine(void *arg)
struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
+ pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
+ n, tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}
intel_timeline_put(tl);
@@ -633,8 +634,9 @@ out:
struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
+ pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
+ n, tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}
intel_timeline_put(tl);
@@ -965,8 +967,9 @@ static int live_hwsp_recycle(void *arg)
}
if (*tl->hwsp_seqno != count) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n",
count, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 101728006ae9..d44061033f23 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -67,7 +67,7 @@ struct __guc_ads_blob {
static void __guc_ads_init(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct intel_gt *gt = guc_to_gt(guc);
struct __guc_ads_blob *blob = guc->ads_blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
@@ -99,13 +99,13 @@ static void __guc_ads_init(struct intel_guc *guc)
}
/* System info */
- blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
+ blob->system_info.slice_enabled = hweight8(gt->info.sseu.slice_mask);
blob->system_info.rcs_enabled = 1;
blob->system_info.bcs_enabled = 1;
- blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
- blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
- blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt);
+ blob->system_info.vebox_enable_mask = VEBOX_MASK(gt);
+ blob->system_info.vdbox_sfc_support_mask = gt->info.vdbox_sfc_access;
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 1c2d6358826c..d6f55f70889d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -267,8 +267,17 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
GEM_BUG_ON(!intel_uc_wants_guc(uc));
err = intel_uc_fw_fetch(&uc->guc.fw);
- if (err)
+ if (err) {
+ /* Make sure we transition out of transient "SELECTED" state */
+ if (intel_uc_wants_huc(uc)) {
+ drm_dbg(&uc_to_gt(uc)->i915->drm,
+ "Failed to fetch GuC: %d disabling HuC\n", err);
+ intel_uc_fw_change_status(&uc->huc.fw,
+ INTEL_UC_FIRMWARE_ERROR);
+ }
+
return;
+ }
if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
index 9d16b784aa0d..089d98662f46 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -4,14 +4,41 @@
*/
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+#include "gt/debugfs_gt.h"
#include "intel_guc_debugfs.h"
#include "intel_huc_debugfs.h"
#include "intel_uc.h"
#include "intel_uc_debugfs.h"
+static int uc_usage_show(struct seq_file *m, void *data)
+{
+ struct intel_uc *uc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n",
+ yesno(intel_uc_supports_guc(uc)),
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_uses_guc(uc)));
+ drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n",
+ yesno(intel_uc_supports_huc(uc)),
+ yesno(intel_uc_wants_huc(uc)),
+ yesno(intel_uc_uses_huc(uc)));
+ drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n",
+ yesno(intel_uc_supports_guc_submission(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_uses_guc_submission(uc)));
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(uc_usage);
+
void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
{
+ static const struct debugfs_gt_file files[] = {
+ { "usage", &uc_usage_fops, NULL },
+ };
struct dentry *root;
if (!gt_root)
@@ -25,6 +52,8 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
if (IS_ERR(root))
return;
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
+
intel_guc_debugfs_register(&uc->guc, root);
intel_huc_debugfs_register(&uc->huc, root);
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index ec47d4114554..62e6a14ad58e 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt,
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
- node = kmalloc(sizeof(*node), GFP_KERNEL);
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 26cae4846c82..63bba7b4bb2f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -347,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
}
- engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
+ engine_mask &= vgpu->gvt->gt->info.engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -1729,14 +1729,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
- if (data & _MASKED_BIT_ENABLE(1)) {
+ if (IS_MASKED_BITS_ENABLED(data, 1)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
- data & _MASKED_BIT_ENABLE(2)) {
+ IS_MASKED_BITS_ENABLED(data, 2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
@@ -1745,14 +1745,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
*/
- if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
- (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
- && !vgpu->pv_notified) {
+ if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
+ IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
+ !vgpu->pv_notified) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
- if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
- || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+ if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
+ IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
gvt_dbg_core("EXECLIST %s on ring %s\n",
@@ -1813,7 +1813,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
data |= RESET_CTL_READY_TO_RESET;
else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
data &= ~RESET_CTL_READY_TO_RESET;
@@ -1831,7 +1831,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
- if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+ if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
+ IS_MASKED_BITS_ENABLED(data, 0x8))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -1867,7 +1868,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
- if (HAS_ENGINE(dev_priv, VCS1)) \
+ if (HAS_ENGINE(gvt->gt, VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
@@ -3059,6 +3060,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
@@ -3135,8 +3137,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 540017fed908..7498878e6289 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -540,7 +540,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
+ if (HAS_ENGINE(gvt->gt, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2ccaf78f96e8..86a60bdf0818 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -171,7 +171,7 @@ static void load_render_mocs(const struct intel_engine_cs *engine)
return;
for (ring_id = 0; ring_id < cnt; ring_id++) {
- if (!HAS_ENGINE(engine->i915, ring_id))
+ if (!HAS_ENGINE(engine->gt, ring_id))
continue;
offset.reg = regs[ring_id];
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 970704b18f23..3b25e7fe32f6 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
-#define IS_RESTORE_INHIBIT(a) \
- (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
- ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
+
+#define IS_RESTORE_INHIBIT(a) \
+ IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 5b66e14c5b7b..b88e033cbed4 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -94,6 +94,11 @@
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+#define IS_MASKED_BITS_ENABLED(_val, _b) \
+ (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+#define IS_MASKED_BITS_DISABLED(_val, _b) \
+ ((_val) & _MASKED_BIT_DISABLE(_b))
+
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9ca94a435b75..784219962193 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -34,11 +34,13 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
+#include "gt/intel_sseu_debugfs.h"
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
@@ -61,6 +63,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
intel_device_info_print_static(INTEL_INFO(i915), &p);
intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+ intel_gt_info_print(&i915->gt.info, &p);
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
@@ -492,6 +495,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
} else if (INTEL_GEN(dev_priv) >= 11) {
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
+ seq_printf(m, "Master Unit Interrupt Control: %08x\n",
+ I915_READ(DG1_MSTR_UNIT_INTR));
+
seq_printf(m, "Master Interrupt Control: %08x\n",
I915_READ(GEN11_GFX_MSTR_IRQ));
@@ -1138,13 +1145,20 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
struct intel_uncore *uncore = &dev_priv->uncore;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ seq_puts(m, "L-shaped memory detected\n");
+
+ /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
+ if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
intel_uncore_read(uncore, DCC));
@@ -1173,9 +1187,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
intel_uncore_read(uncore, DISP_ARB_CTL));
}
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
- seq_puts(m, "L-shaped memory detected\n");
-
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
@@ -1316,16 +1327,6 @@ static int i915_engine_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_rcs_topology(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
-
- return 0;
-}
-
static int i915_shrinker_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1572,264 +1573,16 @@ i915_cache_sharing_set(void *data, u64 val)
return 0;
}
-static void
-intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
- u8 *to_mask)
-{
- int offset = slice * sseu->ss_stride;
-
- memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
-}
-
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
-static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
-{
-#define SS_MAX 2
- const int ss_max = SS_MAX;
- u32 sig1[SS_MAX], sig2[SS_MAX];
- int ss;
-
- sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
- sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
- sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
- sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
-
- for (ss = 0; ss < ss_max; ss++) {
- unsigned int eu_cnt;
-
- if (sig1[ss] & CHV_SS_PG_ENABLE)
- /* skip disabled subslice */
- continue;
-
- sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] |= BIT(ss);
- eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
- ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
- ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
- ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
- sseu->eu_total += eu_cnt;
- sseu->eu_per_subslice = max_t(unsigned int,
- sseu->eu_per_subslice, eu_cnt);
- }
-#undef SS_MAX
-}
-
-static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
-{
-#define SS_MAX 6
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
- u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
- int s, ss;
-
- for (s = 0; s < info->sseu.max_slices; s++) {
- /*
- * FIXME: Valid SS Mask respects the spec and read
- * only valid bits for those registers, excluding reserved
- * although this seems wrong because it would leave many
- * subslices without ACK.
- */
- s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
- GEN10_PGCTL_VALID_SS_MASK(s);
- eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
- eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
- }
-
- eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
- GEN9_PGCTL_SSA_EU19_ACK |
- GEN9_PGCTL_SSA_EU210_ACK |
- GEN9_PGCTL_SSA_EU311_ACK;
- eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
- GEN9_PGCTL_SSB_EU19_ACK |
- GEN9_PGCTL_SSB_EU210_ACK |
- GEN9_PGCTL_SSB_EU311_ACK;
-
- for (s = 0; s < info->sseu.max_slices; s++) {
- if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
- /* skip disabled slice */
- continue;
-
- sseu->slice_mask |= BIT(s);
- intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
-
- for (ss = 0; ss < info->sseu.max_subslices; ss++) {
- unsigned int eu_cnt;
-
- if (info->sseu.has_subslice_pg &&
- !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
- /* skip disabled subslice */
- continue;
-
- eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
- eu_mask[ss % 2]);
- sseu->eu_total += eu_cnt;
- sseu->eu_per_subslice = max_t(unsigned int,
- sseu->eu_per_subslice,
- eu_cnt);
- }
- }
-#undef SS_MAX
-}
-
-static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
-{
-#define SS_MAX 3
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
- u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
- int s, ss;
-
- for (s = 0; s < info->sseu.max_slices; s++) {
- s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
- eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
- eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
- }
-
- eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
- GEN9_PGCTL_SSA_EU19_ACK |
- GEN9_PGCTL_SSA_EU210_ACK |
- GEN9_PGCTL_SSA_EU311_ACK;
- eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
- GEN9_PGCTL_SSB_EU19_ACK |
- GEN9_PGCTL_SSB_EU210_ACK |
- GEN9_PGCTL_SSB_EU311_ACK;
-
- for (s = 0; s < info->sseu.max_slices; s++) {
- if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
- /* skip disabled slice */
- continue;
-
- sseu->slice_mask |= BIT(s);
-
- if (IS_GEN9_BC(dev_priv))
- intel_sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
-
- for (ss = 0; ss < info->sseu.max_subslices; ss++) {
- unsigned int eu_cnt;
- u8 ss_idx = s * info->sseu.ss_stride +
- ss / BITS_PER_BYTE;
-
- if (IS_GEN9_LP(dev_priv)) {
- if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
- /* skip disabled subslice */
- continue;
-
- sseu->subslice_mask[ss_idx] |=
- BIT(ss % BITS_PER_BYTE);
- }
-
- eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
- eu_mask[ss%2]);
- sseu->eu_total += eu_cnt;
- sseu->eu_per_subslice = max_t(unsigned int,
- sseu->eu_per_subslice,
- eu_cnt);
- }
- }
-#undef SS_MAX
-}
-
-static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
-{
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
- u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
- int s;
-
- sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
-
- if (sseu->slice_mask) {
- sseu->eu_per_subslice = info->sseu.eu_per_subslice;
- for (s = 0; s < fls(sseu->slice_mask); s++)
- intel_sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
- sseu->eu_total = sseu->eu_per_subslice *
- intel_sseu_subslice_total(sseu);
-
- /* subtract fused off EU(s) from enabled slice(s) */
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- u8 subslice_7eu = info->sseu.subslice_7eu[s];
-
- sseu->eu_total -= hweight8(subslice_7eu);
- }
- }
-}
-
-static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
- const struct sseu_dev_info *sseu)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const char *type = is_available_info ? "Available" : "Enabled";
- int s;
-
- seq_printf(m, " %s Slice Mask: %04x\n", type,
- sseu->slice_mask);
- seq_printf(m, " %s Slice Total: %u\n", type,
- hweight8(sseu->slice_mask));
- seq_printf(m, " %s Subslice Total: %u\n", type,
- intel_sseu_subslice_total(sseu));
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- seq_printf(m, " %s Slice%i subslices: %u\n", type,
- s, intel_sseu_subslices_per_slice(sseu, s));
- }
- seq_printf(m, " %s EU Total: %u\n", type,
- sseu->eu_total);
- seq_printf(m, " %s EU Per Subslice: %u\n", type,
- sseu->eu_per_subslice);
-
- if (!is_available_info)
- return;
-
- seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
- if (HAS_POOLED_EU(dev_priv))
- seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
-
- seq_printf(m, " Has Slice Power Gating: %s\n",
- yesno(sseu->has_slice_pg));
- seq_printf(m, " Has Subslice Power Gating: %s\n",
- yesno(sseu->has_subslice_pg));
- seq_printf(m, " Has EU Power Gating: %s\n",
- yesno(sseu->has_eu_pg));
-}
-
static int i915_sseu_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
- struct sseu_dev_info sseu;
- intel_wakeref_t wakeref;
-
- if (INTEL_GEN(dev_priv) < 8)
- return -ENODEV;
-
- seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &info->sseu);
-
- seq_puts(m, "SSEU Device Status\n");
- memset(&sseu, 0, sizeof(sseu));
- intel_sseu_set_info(&sseu, info->sseu.max_slices,
- info->sseu.max_subslices,
- info->sseu.max_eus_per_subslice);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_sseu_device_status(dev_priv, &sseu);
- else if (IS_BROADWELL(dev_priv))
- bdw_sseu_device_status(dev_priv, &sseu);
- else if (IS_GEN(dev_priv, 9))
- gen9_sseu_device_status(dev_priv, &sseu);
- else if (INTEL_GEN(dev_priv) >= 10)
- gen10_sseu_device_status(dev_priv, &sseu);
- }
-
- i915_print_sseu_info(m, false, &sseu);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_gt *gt = &i915->gt;
- return 0;
+ return intel_sseu_status(m, gt);
}
static int i915_forcewake_open(struct inode *inode, struct file *file)
@@ -1876,7 +1629,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_llc", i915_llc, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_engine_info", i915_engine_info, 0},
- {"i915_rcs_topology", i915_rcs_topology, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_sseu_status", i915_sseu_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 67102dc26fce..5fd5af4bc855 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -531,13 +531,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
- intel_device_info_init_mmio(dev_priv);
-
- intel_uncore_prune_mmio_domains(&dev_priv->uncore);
-
- intel_uc_init_mmio(&dev_priv->gt.uc);
-
- ret = intel_engines_init_mmio(&dev_priv->gt);
+ ret = intel_gt_init_mmio(&dev_priv->gt);
if (ret)
goto err_uncore;
@@ -890,6 +884,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
+ intel_gt_info_print(&dev_priv->gt.info, &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2e88d49f3e..e4f7f6518945 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -108,8 +108,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200702"
-#define DRIVER_TIMESTAMP 1593714328
+#define DRIVER_DATE "20200715"
+#define DRIVER_TIMESTAMP 1594811881
struct drm_i915_gem_object;
@@ -442,6 +442,7 @@ struct intel_fbc {
struct {
const struct drm_format_info *format;
unsigned int stride;
+ u64 modifier;
} fb;
int cfb_size;
@@ -692,6 +693,7 @@ struct intel_vbt_data {
bool initialized;
int bpp;
struct edp_power_seq pps;
+ bool hobl;
} edp;
struct {
@@ -1256,7 +1258,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
+ for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
(tmp__) ? \
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
@@ -1430,6 +1432,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
+#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1558,22 +1561,29 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_RKL_REVID(p, since, until) \
(IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
+#define DG1_REVID_A0 0x0
+#define DG1_REVID_B0 0x1
+
+#define IS_DG1_REVID(p, since, until) \
+ (IS_DG1(p) && IS_REVID(p, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
-#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
+#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
+#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
-#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
+#define ENGINE_INSTANCES_MASK(gt, first, count) ({ \
unsigned int first__ = (first); \
unsigned int count__ = (count); \
- (INTEL_INFO(dev_priv)->engine_mask & \
+ ((gt)->info.engine_mask & \
GENMASK(first__ + count__ - 1, first__)) >> first__; \
})
-#define VDBOX_MASK(dev_priv) \
- ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
-#define VEBOX_MASK(dev_priv) \
- ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+#define VDBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
@@ -1597,6 +1607,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
(INTEL_INFO(dev_priv)->has_logical_ring_preemption)
+#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
+
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 1753c84d6c0d..f333e88a2b6e 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -72,7 +72,7 @@ struct drm_i915_private;
trace_printk(__VA_ARGS__); \
} while (0)
#define GEM_TRACE_DUMP() \
- do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0)
+ do { ftrace_dump(DUMP_ALL); __add_taint_for_CI(TAINT_WARN); } while (0)
#define GEM_TRACE_DUMP_ON(expr) \
do { if (expr) GEM_TRACE_DUMP(); } while (0)
#else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cb43381b0d37..c5ee1567f3d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_KERNEL_MAPPING |
DMA_ATTR_NO_WARN))
return 0;
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 40390b2352b1..421613219ae9 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -12,7 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
drm_i915_getparam_t *param = data;
int value;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 866166ada10e..6a3a2ce0b394 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -42,6 +42,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
@@ -425,7 +426,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct intel_engine_coredump *ee)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
+ const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
int slice;
int subslice;
@@ -619,16 +620,13 @@ static void print_error_vma(struct drm_i915_error_state_buf *m,
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
- const struct intel_device_info *info,
- const struct intel_runtime_info *runtime,
- const struct intel_driver_caps *caps)
+ struct i915_gpu_coredump *error)
{
struct drm_printer p = i915_error_printer(m);
- intel_device_info_print_static(info, &p);
- intel_device_info_print_runtime(runtime, &p);
- intel_device_info_print_topology(&runtime->sseu, &p);
- intel_driver_caps_print(caps, &p);
+ intel_device_info_print_static(&error->device_info, &p);
+ intel_device_info_print_runtime(&error->runtime_info, &p);
+ intel_driver_caps_print(&error->driver_caps, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -678,6 +676,15 @@ static void err_free_sgl(struct scatterlist *sgl)
}
}
+static void err_print_gt_info(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ struct drm_printer p = i915_error_printer(m);
+
+ intel_gt_info_print(&gt->info, &p);
+ intel_sseu_print_topology(&gt->info.sseu, &p);
+}
+
static void err_print_gt(struct drm_i915_error_state_buf *m,
struct intel_gt_coredump *gt)
{
@@ -734,6 +741,8 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
if (gt->uc)
err_print_uc(m, gt->uc);
+
+ err_print_gt_info(m, gt);
}
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
@@ -798,8 +807,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->display)
intel_display_print_error_state(m, error->display);
- err_print_capabilities(m, &error->device_info, &error->runtime_info,
- &error->driver_caps);
+ err_print_capabilities(m, error);
err_print_params(m, &error->params);
}
@@ -1630,6 +1638,11 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
}
+static void gt_record_info(struct intel_gt_coredump *gt)
+{
+ memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
+}
+
/*
* Generate a semi-unique error code. The code is not meant to have meaning, The
* code's only purpose is to try to prevent false duplicated bug reports by
@@ -1808,6 +1821,7 @@ struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM);
}
+ gt_record_info(error->gt);
gt_record_engines(error->gt, compress);
if (INTEL_INFO(i915)->has_gt_uc)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 76b80fbfb7e9..0220b0992808 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -15,6 +15,7 @@
#include <drm/drm_mm.h>
#include "gt/intel_engine.h"
+#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
#include "intel_device_info.h"
@@ -118,6 +119,8 @@ struct intel_gt_coredump {
bool awake;
bool simulated;
+ struct intel_gt_info info;
+
/* Generic register state */
u32 eir;
u32 pgtbl_er;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 562b43ed077f..1fa67700d8f4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2584,6 +2584,46 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gen11_master_intr_enable);
}
+static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
+{
+ u32 val;
+
+ /* First disable interrupts */
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
+
+ /* Get the indication levels and ack the master unit */
+ val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
+ if (unlikely(!val))
+ return 0;
+
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
+ * out as this bit doesn't exist anymore for DG1
+ */
+ val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
+ if (unlikely(!val))
+ return 0;
+
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
+
+ return val;
+}
+
+static inline void dg1_master_intr_enable(void __iomem * const regs)
+{
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
+}
+
+static irqreturn_t dg1_irq_handler(int irq, void *arg)
+{
+ return __gen11_irq_handler(arg,
+ dg1_master_intr_disable_and_ack,
+ dg1_master_intr_enable);
+}
+
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2920,7 +2960,10 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- gen11_master_intr_disable(dev_priv->uncore.regs);
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
+ dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
+ else
+ gen11_master_intr_disable(dev_priv->uncore.regs);
gen11_gt_irq_reset(&dev_priv->gt);
gen11_display_irq_reset(dev_priv);
@@ -3071,7 +3114,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
hotplug_irqs = sde_ddi_mask | sde_tc_mask;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3517,8 +3561,13 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(uncore->regs);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
+ dg1_master_intr_enable(uncore->regs);
+ POSTING_READ(DG1_MSTR_UNIT_INTR);
+ } else {
+ gen11_master_intr_enable(uncore->regs);
+ POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ }
}
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -4043,6 +4092,8 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
else
return i8xx_irq_handler;
} else {
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
+ return dg1_irq_handler;
if (INTEL_GEN(dev_priv) >= 11)
return gen11_irq_handler;
else if (INTEL_GEN(dev_priv) >= 8)
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index e5fdf17cd9cd..2338f92ce490 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -168,7 +168,7 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -188,7 +188,7 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -217,6 +217,7 @@ static const struct intel_device_info i85x_info = {
static const struct intel_device_info i865g_info = {
I845_FEATURES,
PLATFORM(INTEL_I865G),
+ .display.has_fbc = 1,
};
#define GEN3_FEATURES \
@@ -225,7 +226,7 @@ static const struct intel_device_info i865g_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 32, \
@@ -316,7 +317,7 @@ static const struct intel_device_info pnv_m_info = {
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 36, \
@@ -348,7 +349,7 @@ static const struct intel_device_info i965gm_info = {
static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .engine_mask = BIT(RCS0) | BIT(VCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -358,7 +359,7 @@ static const struct intel_device_info gm45_info = {
.is_mobile = 1,
.display.has_fbc = 1,
.display.supports_tv = 1,
- .engine_mask = BIT(RCS0) | BIT(VCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -367,7 +368,7 @@ static const struct intel_device_info gm45_info = {
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
- .engine_mask = BIT(RCS0) | BIT(VCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
@@ -397,7 +398,7 @@ static const struct intel_device_info ilk_m_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
@@ -448,7 +449,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
@@ -519,7 +520,7 @@ static const struct intel_device_info vlv_info = {
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display_mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
@@ -530,7 +531,7 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
@@ -597,7 +598,7 @@ static const struct intel_device_info bdw_rsvd_info = {
static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -608,7 +609,7 @@ static const struct intel_device_info chv_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
@@ -661,7 +662,7 @@ static const struct intel_device_info skl_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .engine_mask = \
+ .platform_engine_mask = \
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
@@ -680,7 +681,7 @@ static const struct intel_device_info skl_gt4_info = {
.is_lp = 1, \
.num_supported_dbuf_slices = 1, \
.display.has_hotplug = 1, \
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
@@ -743,7 +744,7 @@ static const struct intel_device_info kbl_gt2_info = {
static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -764,7 +765,7 @@ static const struct intel_device_info cfl_gt2_info = {
static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -833,7 +834,7 @@ static const struct intel_device_info cnl_info = {
static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
@@ -841,7 +842,7 @@ static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
.require_force_probe = 1,
- .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.ppgtt_size = 36,
};
@@ -877,7 +878,7 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.display.has_modular_fia = 1,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
@@ -890,14 +891,26 @@ static const struct intel_device_info rkl_info = {
BIT(TRANSCODER_C),
.require_force_probe = 1,
.display.has_psr_hw_tracking = 0,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
};
#define GEN12_DGFX_FEATURES \
GEN12_FEATURES, \
+ .memory_regions = REGION_SMEM | REGION_LMEM, \
+ .has_master_unit_irq = 1, \
.is_dgfx = 1
+static const struct intel_device_info dg1_info __maybe_unused = {
+ GEN12_DGFX_FEATURES,
+ PLATFORM(INTEL_DG1),
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .require_force_probe = 1,
+ .platform_engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
+ BIT(VCS0) | BIT(VCS2),
+};
+
#undef GEN
#undef PLATFORM
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 25329b7600c9..c6f6370283cf 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1592,6 +1592,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
u32 d;
cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
+ cmd |= MI_SRM_LRM_GLOBAL_GTT;
if (INTEL_GEN(stream->perf->i915) >= 8)
cmd++;
@@ -1772,7 +1773,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
i915_gem_object_flush_map(bo);
- i915_gem_object_unpin_map(bo);
+ __i915_gem_object_release_map(bo);
stream->noa_wait = vma;
return 0;
@@ -1867,7 +1868,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
*cs++ = 0;
i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_map(obj);
oa_bo->vma = i915_vma_instance(obj,
&stream->engine->gt->ggtt->vm,
@@ -2196,7 +2197,7 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
if (!intel_context_pin_if_active(ce))
continue;
- flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+ flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
err = gen8_modify_context(ce, flex, count);
intel_context_unpin(ce);
@@ -2340,7 +2341,7 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
if (engine->class != RENDER_CLASS)
continue;
- regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
+ regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
err = gen8_modify_self(ce, regs, num_regs, active);
if (err)
@@ -2740,8 +2741,7 @@ static void
get_default_sseu_config(struct intel_sseu *out_sseu,
struct intel_engine_cs *engine)
{
- const struct sseu_dev_info *devinfo_sseu =
- &RUNTIME_INFO(engine->i915)->sseu;
+ const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
@@ -2766,7 +2766,7 @@ get_sseu_config(struct intel_sseu *out_sseu,
drm_sseu->engine.engine_instance != engine->uabi_instance)
return -EINVAL;
- return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu);
+ return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index c1ebda9b5627..fed337ad7b68 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -31,7 +31,7 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d6536afc94b..4e796ff4d7d0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -868,7 +868,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG1 _MMIO(0x2740)
#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
-#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+#define OAREPORTTRIG1_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG2 _MMIO(0x2744)
#define OAREPORTTRIG2_INVERT_A_0 (1 << 0)
@@ -921,7 +921,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG5 _MMIO(0x2750)
#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
-#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+#define OAREPORTTRIG5_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG6 _MMIO(0x2754)
#define OAREPORTTRIG6_INVERT_A_0 (1 << 0)
@@ -1974,6 +1974,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
+#define DCC_MODE_SELECT_MASK (0x3 << 20)
+#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
#define COMMON_KEEPER_EN (1 << 26)
#define LATENCY_OPTIM_MASK (0x3 << 2)
#define LATENCY_OPTIM_VAL(x) ((x) << 2)
@@ -2072,6 +2074,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
+#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
+#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
+#define ICL_PORT_TX_DW8_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(8, 0, phy))
+#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31)
+#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29)
+#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1)
+
#define _ICL_DPHY_CHKN_REG 0x194
#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
@@ -2827,6 +2836,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define SCPD_FBC_IGNORE_3D (1 << 6)
#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
@@ -7723,6 +7733,10 @@ enum {
#define GEN11_GT_DW1_IRQ (1 << 1)
#define GEN11_GT_DW0_IRQ (1 << 0)
+#define DG1_MSTR_UNIT_INTR _MMIO(0x190008)
+#define DG1_MSTR_IRQ REG_BIT(31)
+#define DG1_MSTR_UNIT(u) REG_BIT(u)
+
#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 3bb7320249ae..0b2fe55e6194 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -560,22 +560,25 @@ bool __i915_request_submit(struct i915_request *request)
engine->serial++;
result = true;
-xfer: /* We may be recursing from the signal callback of another i915 fence */
- spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
-
+xfer:
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests);
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
- __notify_execute_cb(request);
}
- GEM_BUG_ON(!llist_empty(&request->execute_cb));
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
- !i915_request_enable_breadcrumb(request))
- intel_engine_signal_breadcrumbs(engine);
+ /* We may be recursing from the signal callback of another i915 fence */
+ if (!i915_request_signaled(request)) {
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+
+ __notify_execute_cb(request);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &request->fence.flags) &&
+ !i915_request_enable_breadcrumb(request))
+ intel_engine_signal_breadcrumbs(engine);
- spin_unlock(&request->lock);
+ spin_unlock(&request->lock);
+ GEM_BUG_ON(!llist_empty(&request->execute_cb));
+ }
return result;
}
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index f42a9e9a0b4f..4c305d838016 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -49,6 +49,16 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
}
}
+void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint)
+{
+ __i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n",
+ taint, (void *)_RET_IP_);
+
+ /* Failures that occur during fault injection testing are expected */
+ if (!i915_error_injected())
+ __add_taint_for_CI(taint);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static unsigned int i915_probe_fail_count;
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 03a73d2bd50d..54773371e6bd 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -266,19 +266,6 @@ static inline int list_is_last_rcu(const struct list_head *list,
return READ_ONCE(list->next) == head;
}
-/*
- * Wait until the work is finally complete, even if it tries to postpone
- * by requeueing itself. Note, that if the worker never cancels itself,
- * we will spin forever.
- */
-static inline void drain_delayed_work(struct delayed_work *dw)
-{
- do {
- while (flush_delayed_work(dw))
- ;
- } while (delayed_work_pending(dw));
-}
-
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
@@ -436,7 +423,8 @@ static inline const char *enableddisabled(bool v)
return v ? "enabled" : "disabled";
}
-static inline void add_taint_for_CI(unsigned int taint)
+void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
+static inline void __add_taint_for_CI(unsigned int taint)
{
/*
* The system is "ok", just about surviving for the user, but
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 7fe1f317cd2b..bc64f773dcdb 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -104,6 +104,7 @@ vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
+ struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
struct rb_node *rb, **p;
@@ -184,7 +185,6 @@ vma_create(struct drm_i915_gem_object *obj,
rb = NULL;
p = &obj->vma.tree.rb_node;
while (*p) {
- struct i915_vma *pos;
long cmp;
rb = *p;
@@ -196,17 +196,12 @@ vma_create(struct drm_i915_gem_object *obj,
* and dispose of ours.
*/
cmp = i915_vma_compare(pos, vm, view);
- if (cmp == 0) {
- spin_unlock(&obj->vma.lock);
- i915_vm_put(vm);
- i915_vma_free(vma);
- return pos;
- }
-
if (cmp < 0)
p = &rb->rb_right;
- else
+ else if (cmp > 0)
p = &rb->rb_left;
+ else
+ goto err_unlock;
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma.tree);
@@ -229,8 +224,9 @@ vma_create(struct drm_i915_gem_object *obj,
err_unlock:
spin_unlock(&obj->vma.lock);
err_vma:
+ i915_vm_put(vm);
i915_vma_free(vma);
- return ERR_PTR(-E2BIG);
+ return pos;
}
static struct i915_vma *
@@ -308,7 +304,7 @@ static int __vma_bind(struct dma_fence_work *work)
struct i915_vma *vma = vw->vma;
int err;
- err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+ err = vma->ops->bind_vma(vma->vm, vma, vw->cache_level, vw->flags);
if (err)
atomic_or(I915_VMA_ERROR, &vma->flags);
@@ -411,7 +407,7 @@ int i915_vma_bind(struct i915_vma *vma,
work->vma = vma;
work->cache_level = cache_level;
- work->flags = bind_flags | I915_VMA_ALLOC;
+ work->flags = bind_flags;
/*
* Note we only want to chain up to the migration fence on
@@ -437,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma,
work->pinned = vma->obj;
}
} else {
- ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+ ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags);
if (ret)
return ret;
}
@@ -1265,7 +1261,7 @@ void __i915_vma_evict(struct i915_vma *vma)
if (likely(atomic_read(&vma->vm->open))) {
trace_i915_vma_unbind(vma);
- vma->ops->unbind_vma(vma);
+ vma->ops->unbind_vma(vma->vm, vma);
}
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 63831cdb7402..9e9082dc8f4b 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -235,7 +235,6 @@ struct i915_vma {
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
#define I915_VMA_ALLOC_BIT 12
-#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
#define I915_VMA_ERROR_BIT 13
#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 544ac61fbc36..40c590db3c76 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -26,6 +26,7 @@
#include <drm/i915_pciids.h>
#include "display/intel_cdclk.h"
+#include "display/intel_de.h"
#include "intel_device_info.h"
#include "i915_drv.h"
@@ -63,6 +64,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(ELKHARTLAKE),
PLATFORM_NAME(TIGERLAKE),
PLATFORM_NAME(ROCKETLAKE),
+ PLATFORM_NAME(DG1),
};
#undef PLATFORM_NAME
@@ -91,7 +93,6 @@ static const char *iommu_name(void)
void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p)
{
- drm_printf(p, "engines: %x\n", info->engine_mask);
drm_printf(p, "gen: %d\n", info->gen);
drm_printf(p, "gt: %d\n", info->gt);
drm_printf(p, "iommu: %s\n", iommu_name());
@@ -111,571 +112,18 @@ void intel_device_info_print_static(const struct intel_device_info *info,
#undef PRINT_FLAG
}
-static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
-{
- int s;
-
- drm_printf(p, "slice total: %u, mask=%04x\n",
- hweight8(sseu->slice_mask), sseu->slice_mask);
- drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
- for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
- s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
- }
- drm_printf(p, "EU total: %u\n", sseu->eu_total);
- drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
- drm_printf(p, "has slice power gating: %s\n",
- yesno(sseu->has_slice_pg));
- drm_printf(p, "has subslice power gating: %s\n",
- yesno(sseu->has_subslice_pg));
- drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
-}
-
void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
- sseu_dump(&info->sseu, p);
-
drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
drm_printf(p, "CS timestamp frequency: %u Hz\n",
info->cs_timestamp_frequency_hz);
}
-static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
- int subslice)
-{
- int slice_stride = sseu->max_subslices * sseu->eu_stride;
-
- return slice * slice_stride + subslice * sseu->eu_stride;
-}
-
-static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
- int subslice)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
- u16 eu_mask = 0;
-
- for (i = 0; i < sseu->eu_stride; i++) {
- eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
- (i * BITS_PER_BYTE);
- }
-
- return eu_mask;
-}
-
-static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
- u16 eu_mask)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
-
- for (i = 0; i < sseu->eu_stride; i++) {
- sseu->eu_mask[offset + i] =
- (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
- }
-}
-
-void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p)
-{
- int s, ss;
-
- if (sseu->max_slices == 0) {
- drm_printf(p, "Unavailable\n");
- return;
- }
-
- for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
- s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
-
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- u16 enabled_eus = sseu_get_eus(sseu, s, ss);
-
- drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
- ss, hweight16(enabled_eus), enabled_eus);
- }
- }
-}
-
-static u16 compute_eu_total(const struct sseu_dev_info *sseu)
-{
- u16 i, total = 0;
-
- for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
- total += hweight8(sseu->eu_mask[i]);
-
- return total;
-}
-
-static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
- u8 s_en, u32 ss_en, u16 eu_en)
-{
- int s, ss;
-
- /* ss_en represents entire subslice mask across all slices */
- GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
- sizeof(ss_en) * BITS_PER_BYTE);
-
- for (s = 0; s < sseu->max_slices; s++) {
- if ((s_en & BIT(s)) == 0)
- continue;
-
- sseu->slice_mask |= BIT(s);
-
- intel_sseu_set_subslices(sseu, s, ss_en);
-
- for (ss = 0; ss < sseu->max_subslices; ss++)
- if (intel_sseu_has_subslice(sseu, s, ss))
- sseu_set_eus(sseu, s, ss, eu_en);
- }
- sseu->eu_per_subslice = hweight16(eu_en);
- sseu->eu_total = compute_eu_total(sseu);
-}
-
-static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- u8 s_en;
- u32 dss_en;
- u16 eu_en = 0;
- u8 eu_en_fuse;
- int eu;
-
- /*
- * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
- * Instead of splitting these, provide userspace with an array
- * of DSS to more closely represent the hardware resource.
- */
- intel_sseu_set_info(sseu, 1, 6, 16);
-
- s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
-
- dss_en = I915_READ(GEN12_GT_DSS_ENABLE);
-
- /* one bit per pair of EUs */
- eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
- for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
- if (eu_en_fuse & BIT(eu))
- eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
-
- gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
-
- /* TGL only supports slice-level power gating */
- sseu->has_slice_pg = 1;
-}
-
-static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- u8 s_en;
- u32 ss_en;
- u8 eu_en;
-
- if (IS_ELKHARTLAKE(dev_priv))
- intel_sseu_set_info(sseu, 1, 4, 8);
- else
- intel_sseu_set_info(sseu, 1, 8, 8);
-
- s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
- ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
- eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
-
- gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
-
- /* ICL has no power gating restrictions. */
- sseu->has_slice_pg = 1;
- sseu->has_subslice_pg = 1;
- sseu->has_eu_pg = 1;
-}
-
-static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- const u32 fuse2 = I915_READ(GEN8_FUSE2);
- int s, ss;
- const int eu_mask = 0xff;
- u32 subslice_mask, eu_en;
-
- intel_sseu_set_info(sseu, 6, 4, 8);
-
- sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
- GEN10_F2_S_ENA_SHIFT;
-
- /* Slice0 */
- eu_en = ~I915_READ(GEN8_EU_DISABLE0);
- for (ss = 0; ss < sseu->max_subslices; ss++)
- sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
- /* Slice1 */
- sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
- eu_en = ~I915_READ(GEN8_EU_DISABLE1);
- sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
- /* Slice2 */
- sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
- sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
- /* Slice3 */
- sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
- eu_en = ~I915_READ(GEN8_EU_DISABLE2);
- sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
- /* Slice4 */
- sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
- sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
- /* Slice5 */
- sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
- eu_en = ~I915_READ(GEN10_EU_DISABLE3);
- sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
-
- subslice_mask = (1 << 4) - 1;
- subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
-
- for (s = 0; s < sseu->max_slices; s++) {
- u32 subslice_mask_with_eus = subslice_mask;
-
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- if (sseu_get_eus(sseu, s, ss) == 0)
- subslice_mask_with_eus &= ~BIT(ss);
- }
-
- /*
- * Slice0 can have up to 3 subslices, but there are only 2 in
- * slice1/2.
- */
- intel_sseu_set_subslices(sseu, s, s == 0 ?
- subslice_mask_with_eus :
- subslice_mask_with_eus & 0x3);
- }
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /*
- * CNL is expected to always have a uniform distribution
- * of EU across subslices with the exception that any one
- * EU in any one subslice may be fused off for die
- * recovery.
- */
- sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
- DIV_ROUND_UP(sseu->eu_total,
- intel_sseu_subslice_total(sseu)) :
- 0;
-
- /* No restrictions on Power Gating */
- sseu->has_slice_pg = 1;
- sseu->has_subslice_pg = 1;
- sseu->has_eu_pg = 1;
-}
-
-static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- u32 fuse;
- u8 subslice_mask = 0;
-
- fuse = I915_READ(CHV_FUSE_GT);
-
- sseu->slice_mask = BIT(0);
- intel_sseu_set_info(sseu, 1, 2, 8);
-
- if (!(fuse & CHV_FGT_DISABLE_SS0)) {
- u8 disabled_mask =
- ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
- CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
- (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
- CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
-
- subslice_mask |= BIT(0);
- sseu_set_eus(sseu, 0, 0, ~disabled_mask);
- }
-
- if (!(fuse & CHV_FGT_DISABLE_SS1)) {
- u8 disabled_mask =
- ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
- CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
- (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
- CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
-
- subslice_mask |= BIT(1);
- sseu_set_eus(sseu, 0, 1, ~disabled_mask);
- }
-
- intel_sseu_set_subslices(sseu, 0, subslice_mask);
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /*
- * CHV expected to always have a uniform distribution of EU
- * across subslices.
- */
- sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
- sseu->eu_total /
- intel_sseu_subslice_total(sseu) :
- 0;
- /*
- * CHV supports subslice power gating on devices with more than
- * one subslice, and supports EU power gating on devices with
- * more than one EU pair per subslice.
- */
- sseu->has_slice_pg = 0;
- sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
- sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
-}
-
-static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- int s, ss;
- u32 fuse2, eu_disable, subslice_mask;
- const u8 eu_mask = 0xff;
-
- fuse2 = I915_READ(GEN8_FUSE2);
- sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-
- /* BXT has a single slice and at most 3 subslices. */
- intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
- IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
-
- /*
- * The subslice disable field is global, i.e. it applies
- * to each of the enabled slices.
- */
- subslice_mask = (1 << sseu->max_subslices) - 1;
- subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
- GEN9_F2_SS_DIS_SHIFT);
-
- /*
- * Iterate through enabled slices and subslices to
- * count the total enabled EU.
- */
- for (s = 0; s < sseu->max_slices; s++) {
- if (!(sseu->slice_mask & BIT(s)))
- /* skip disabled slice */
- continue;
-
- intel_sseu_set_subslices(sseu, s, subslice_mask);
-
- eu_disable = I915_READ(GEN9_EU_DISABLE(s));
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- int eu_per_ss;
- u8 eu_disabled_mask;
-
- if (!intel_sseu_has_subslice(sseu, s, ss))
- /* skip disabled subslice */
- continue;
-
- eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
-
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
-
- eu_per_ss = sseu->max_eus_per_subslice -
- hweight8(eu_disabled_mask);
-
- /*
- * Record which subslice(s) has(have) 7 EUs. we
- * can tune the hash used to spread work among
- * subslices if they are unbalanced.
- */
- if (eu_per_ss == 7)
- sseu->subslice_7eu[s] |= BIT(ss);
- }
- }
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /*
- * SKL is expected to always have a uniform distribution
- * of EU across subslices with the exception that any one
- * EU in any one subslice may be fused off for die
- * recovery. BXT is expected to be perfectly uniform in EU
- * distribution.
- */
- sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
- DIV_ROUND_UP(sseu->eu_total,
- intel_sseu_subslice_total(sseu)) :
- 0;
- /*
- * SKL+ supports slice power gating on devices with more than
- * one slice, and supports EU power gating on devices with
- * more than one EU pair per subslice. BXT+ supports subslice
- * power gating on devices with more than one subslice, and
- * supports EU power gating on devices with more than one EU
- * pair per subslice.
- */
- sseu->has_slice_pg =
- !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
- sseu->has_subslice_pg =
- IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
- sseu->has_eu_pg = sseu->eu_per_subslice > 2;
-
- if (IS_GEN9_LP(dev_priv)) {
-#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
-
- sseu->min_eu_in_pool = 0;
- if (info->has_pooled_eu) {
- if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
- sseu->min_eu_in_pool = 3;
- else if (IS_SS_DISABLED(1))
- sseu->min_eu_in_pool = 6;
- else
- sseu->min_eu_in_pool = 9;
- }
-#undef IS_SS_DISABLED
- }
-}
-
-static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- int s, ss;
- u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
-
- fuse2 = I915_READ(GEN8_FUSE2);
- sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- intel_sseu_set_info(sseu, 3, 3, 8);
-
- /*
- * The subslice disable field is global, i.e. it applies
- * to each of the enabled slices.
- */
- subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
- subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
- GEN8_F2_SS_DIS_SHIFT);
-
- eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
- eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
- ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
- (32 - GEN8_EU_DIS0_S1_SHIFT));
- eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
- ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
- (32 - GEN8_EU_DIS1_S2_SHIFT));
-
- /*
- * Iterate through enabled slices and subslices to
- * count the total enabled EU.
- */
- for (s = 0; s < sseu->max_slices; s++) {
- if (!(sseu->slice_mask & BIT(s)))
- /* skip disabled slice */
- continue;
-
- intel_sseu_set_subslices(sseu, s, subslice_mask);
-
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- u8 eu_disabled_mask;
- u32 n_disabled;
-
- if (!intel_sseu_has_subslice(sseu, s, ss))
- /* skip disabled subslice */
- continue;
-
- eu_disabled_mask =
- eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
-
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
-
- n_disabled = hweight8(eu_disabled_mask);
-
- /*
- * Record which subslices have 7 EUs.
- */
- if (sseu->max_eus_per_subslice - n_disabled == 7)
- sseu->subslice_7eu[s] |= 1 << ss;
- }
- }
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /*
- * BDW is expected to always have a uniform distribution of EU across
- * subslices with the exception that any one EU in any one subslice may
- * be fused off for die recovery.
- */
- sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
- DIV_ROUND_UP(sseu->eu_total,
- intel_sseu_subslice_total(sseu)) :
- 0;
-
- /*
- * BDW supports slice power gating on devices with more than
- * one slice.
- */
- sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
- sseu->has_subslice_pg = 0;
- sseu->has_eu_pg = 0;
-}
-
-static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- u32 fuse1;
- u8 subslice_mask = 0;
- int s, ss;
-
- /*
- * There isn't a register to tell us how many slices/subslices. We
- * work off the PCI-ids here.
- */
- switch (INTEL_INFO(dev_priv)->gt) {
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gt);
- /* fall through */
- case 1:
- sseu->slice_mask = BIT(0);
- subslice_mask = BIT(0);
- break;
- case 2:
- sseu->slice_mask = BIT(0);
- subslice_mask = BIT(0) | BIT(1);
- break;
- case 3:
- sseu->slice_mask = BIT(0) | BIT(1);
- subslice_mask = BIT(0) | BIT(1);
- break;
- }
-
- fuse1 = I915_READ(HSW_PAVP_FUSE1);
- switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
- default:
- MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
- HSW_F1_EU_DIS_SHIFT);
- /* fall through */
- case HSW_F1_EU_DIS_10EUS:
- sseu->eu_per_subslice = 10;
- break;
- case HSW_F1_EU_DIS_8EUS:
- sseu->eu_per_subslice = 8;
- break;
- case HSW_F1_EU_DIS_6EUS:
- sseu->eu_per_subslice = 6;
- break;
- }
-
- intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
- hweight8(subslice_mask),
- sseu->eu_per_subslice);
-
- for (s = 0; s < sseu->max_slices; s++) {
- intel_sseu_set_subslices(sseu, s, subslice_mask);
-
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- sseu_set_eus(sseu, s, ss,
- (1UL << sseu->eu_per_subslice) - 1);
- }
- }
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /* No powergating for you. */
- sseu->has_slice_pg = 0;
- sseu->has_subslice_pg = 0;
- sseu->has_eu_pg = 0;
-}
-
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
{
- u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
+ u32 ts_override = intel_uncore_read(&dev_priv->uncore,
+ GEN9_TIMESTAMP_OVERRIDE);
u32 base_freq, frac_freq;
base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
@@ -738,6 +186,7 @@ static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 f12_5_mhz = 12500000;
u32 f19_2_mhz = 19200000;
u32 f24_mhz = 24000000;
@@ -759,7 +208,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
*/
return f12_5_mhz;
} else if (INTEL_GEN(dev_priv) <= 9) {
- u32 ctc_reg = I915_READ(CTC_MODE);
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
@@ -777,7 +226,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
return freq;
} else if (INTEL_GEN(dev_priv) <= 12) {
- u32 ctc_reg = I915_READ(CTC_MODE);
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
/* First figure out the reference frequency. There are 2 ways
@@ -788,7 +237,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(dev_priv);
} else {
- u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
+ u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
if (INTEL_GEN(dev_priv) <= 10)
freq = gen10_get_crystal_clock_freq(dev_priv,
@@ -967,8 +416,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
HAS_PCH_SPLIT(dev_priv)) {
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP);
/*
* SFUSE_STRAP is supposed to have a bit signalling the display
@@ -993,7 +442,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
- u32 dfsm = I915_READ(SKL_DFSM);
+ u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
info->pipe_mask &= ~BIT(PIPE_A);
@@ -1027,22 +476,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->display.has_dsc = 0;
}
- /* Initialize slice/subslice/EU info */
- if (IS_HASWELL(dev_priv))
- hsw_sseu_info_init(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_sseu_info_init(dev_priv);
- else if (IS_BROADWELL(dev_priv))
- bdw_sseu_info_init(dev_priv);
- else if (IS_GEN(dev_priv, 9))
- gen9_sseu_info_init(dev_priv);
- else if (IS_GEN(dev_priv, 10))
- gen10_sseu_info_init(dev_priv);
- else if (IS_GEN(dev_priv, 11))
- gen11_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 12)
- gen12_sseu_info_init(dev_priv);
-
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
@@ -1073,67 +506,3 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
yesno(caps->has_logical_contexts));
drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
-
-/*
- * Determine which engines are fused off in our particular hardware. Since the
- * fuse register is in the blitter powerwell, we need forcewake to be ready at
- * this point (but later we need to prune the forcewake domains for engines that
- * are indeed fused off).
- */
-void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
-{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
- unsigned int logical_vdbox = 0;
- unsigned int i;
- u32 media_fuse;
- u16 vdbox_mask;
- u16 vebox_mask;
-
- if (INTEL_GEN(dev_priv) < 11)
- return;
-
- media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
-
- vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
-
- for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i))) {
- vdbox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vdbox_mask)) {
- info->engine_mask &= ~BIT(_VCS(i));
- drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
- continue;
- }
-
- /*
- * In Gen11, only even numbered logical VDBOXes are
- * hooked up to an SFC (Scaler & Format Converter) unit.
- * In TGL each VDBOX has access to an SFC.
- */
- if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
- RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
- }
- drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(dev_priv));
- GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
-
- for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i))) {
- vebox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vebox_mask)) {
- info->engine_mask &= ~BIT(_VECS(i));
- drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
- }
- }
- drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(dev_priv));
- GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
-}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 8d62b8538585..fd2385457ab6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -82,6 +82,7 @@ enum intel_platform {
/* gen12 */
INTEL_TIGERLAKE,
INTEL_ROCKETLAKE,
+ INTEL_DG1,
INTEL_MAX_PLATFORMS
};
@@ -122,6 +123,7 @@ enum intel_ppgtt_type {
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
+ func(has_master_unit_irq); \
func(has_pooled_eu); \
func(has_rc6); \
func(has_rc6p); \
@@ -157,7 +159,7 @@ struct intel_device_info {
u8 gen;
u8 gt; /* GT number, 0 if undefined */
- intel_engine_mask_t engine_mask; /* Engines supported by the HW */
+ intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
enum intel_platform platform;
@@ -219,18 +221,10 @@ struct intel_runtime_info {
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
- u8 num_engines;
-
- /* Slice/subslice/EU info */
- struct sseu_dev_info sseu;
-
u32 rawclk_freq;
u32 cs_timestamp_frequency_hz;
u32 cs_timestamp_period_ns;
-
- /* Media engine access to SFC per instance */
- u8 vdbox_sfc_access;
};
struct intel_driver_caps {
@@ -247,10 +241,6 @@ void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p);
void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p);
-void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p);
-
-void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index c668e99eb2e4..6c97192e9ca8 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -188,6 +188,12 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
+ /* DG1 has south engine display on the same PCI device */
+ if (IS_DG1(dev_priv)) {
+ dev_priv->pch_type = PCH_DG1;
+ return;
+ }
+
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 3053d1ce398b..06d2cd50af0b 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -26,6 +26,9 @@ enum intel_pch {
PCH_JSP, /* Jasper Lake PCH */
PCH_MCC, /* Mule Creek Canyon PCH */
PCH_TGP, /* Tiger Lake PCH */
+
+ /* Fake PCHs, functionality handled on the same PCI dev */
+ PCH_DG1 = 1024,
};
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
@@ -56,6 +59,7 @@ enum intel_pch {
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 565a2b9da3b3..cfabbe0481ab 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -94,16 +94,13 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
- /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
- /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
+ /*
+ * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
+ * Display WA #0859: skl,bxt,kbl,glk,cfl
+ */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
- DISP_FBC_WM_DIS |
DISP_FBC_MEMORY_WAKE);
- /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
- ILK_DPFC_DISABLE_DUMMY0);
-
if (IS_SKYLAKE(dev_priv)) {
/* WaDisableDopClockGating */
I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
@@ -140,6 +137,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* application, using batch buffers or any other means.
*/
I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+
+ /*
+ * WaFbcTurnOffFbcWatermark:bxt
+ * Display WA #0562: bxt
+ */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS);
+
+ /*
+ * WaFbcHighMemBwCorruptionAvoidance:bxt
+ * Display WA #0883: bxt
+ */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
}
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7098,6 +7109,10 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ /* Wa_1409120013:icl,ehl */
+ I915_WRITE(ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
/* This is not an Wa. Enable to reduce Sampler power */
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
@@ -7112,9 +7127,13 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
+ /* Wa_1409120013:tgl */
+ I915_WRITE(ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
- if (HAS_ENGINE(dev_priv, _VCS(i)))
+ if (HAS_ENGINE(&dev_priv->gt, _VCS(i)))
vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
VDN_MFX_POWERGATE_ENABLE(i);
}
@@ -7155,7 +7174,10 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
- /* WaFbcWakeMemOn:cnl */
+ /*
+ * WaFbcWakeMemOn:cnl
+ * Display WA #0859: cnl
+ */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
@@ -7181,7 +7203,17 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
cnp_init_clock_gating(dev_priv);
gen9_init_clock_gating(dev_priv);
- /* WaFbcNukeOnHostModify:cfl */
+ /*
+ * WaFbcTurnOffFbcWatermark:cfl
+ * Display WA #0562: cfl
+ */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS);
+
+ /*
+ * WaFbcNukeOnHostModify:cfl
+ * Display WA #0873: cfl
+ */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7200,7 +7232,17 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
- /* WaFbcNukeOnHostModify:kbl */
+ /*
+ * WaFbcTurnOffFbcWatermark:kbl
+ * Display WA #0562: kbl
+ */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS);
+
+ /*
+ * WaFbcNukeOnHostModify:kbl
+ * Display WA #0873: kbl
+ */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7213,15 +7255,37 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
- /* WaFbcNukeOnHostModify:skl */
+ /*
+ * WaFbcTurnOffFbcWatermark:skl
+ * Display WA #0562: skl
+ */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS);
+
+ /*
+ * WaFbcNukeOnHostModify:skl
+ * Display WA #0873: skl
+ */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+
+ /*
+ * WaFbcHighMemBwCorruptionAvoidance:skl
+ * Display WA #0883: skl
+ */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
}
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
+ I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ HSW_FBCQ_DIS);
+
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7269,6 +7333,11 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
+ I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ HSW_FBCQ_DIS);
+
/* This is required by WaCatErrorRejectionIssue:hsw */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -7286,6 +7355,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+
/* WaDisableBackToBackFlipFix:ivb */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
@@ -7471,6 +7545,16 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
+
+ /*
+ * Have FBC ignore 3D activity since we use software
+ * render tracking, and otherwise a pure 3D workload
+ * (even if it just renders a single frame and then does
+ * abosultely nothing) would not allow FBC to recompress
+ * until a 2D blit occurs.
+ */
+ I915_WRITE(SCPD0,
+ _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 592364aed2da..f5edee17902a 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -142,7 +142,7 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
- add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+ add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
}
}
@@ -219,7 +219,7 @@ fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
- add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+ add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
}
}
@@ -1529,6 +1529,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
if (INTEL_GEN(i915) >= 11) {
+ /* we'll prune the domains of missing engines later */
+ intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
int i;
uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
@@ -1541,7 +1543,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_BLITTER_GEN9);
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(i915, _VCS(i)))
+ if (!__HAS_ENGINE(emask, _VCS(i)))
continue;
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
@@ -1549,7 +1551,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
}
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(i915, _VECS(i)))
+ if (!__HAS_ENGINE(emask, _VECS(i)))
continue;
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
@@ -1844,20 +1846,20 @@ out_mmio_cleanup:
* the forcewake domains. Prune them, to make sure they only reference existing
* engines.
*/
-void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
+void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
+ struct intel_gt *gt)
{
- struct drm_i915_private *i915 = uncore->i915;
enum forcewake_domains fw_domains = uncore->fw_domains;
enum forcewake_domain_id domain_id;
int i;
- if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
+ if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11)
return;
for (i = 0; i < I915_MAX_VCS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
- if (HAS_ENGINE(i915, _VCS(i)))
+ if (HAS_ENGINE(gt, _VCS(i)))
continue;
if (fw_domains & BIT(domain_id))
@@ -1867,7 +1869,7 @@ void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
for (i = 0; i < I915_MAX_VECS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
- if (HAS_ENGINE(i915, _VECS(i)))
+ if (HAS_ENGINE(gt, _VECS(i)))
continue;
if (fw_domains & BIT(domain_id))
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 8d3aa8b9acf9..c4b22d9d0b45 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -35,6 +35,7 @@
struct drm_i915_private;
struct intel_runtime_pm;
struct intel_uncore;
+struct intel_gt;
struct intel_uncore_mmio_debug {
spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -186,7 +187,8 @@ intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct drm_i915_private *i915);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
-void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
+void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
+ struct intel_gt *gt);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
void intel_uncore_fini_mmio(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index be54570c407c..c2d001d9c0ec 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -280,11 +280,144 @@ out:
return err;
}
+static int live_noa_gpr(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 *cs, *store;
+ void *scratch;
+ u32 gpr0;
+ int err;
+ int i;
+
+ /* Check that the delay does not clobber user context state (GPR) */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -ENOMEM;
+
+ gpr0 = i915_mmio_reg_offset(GEN8_RING_CS_GPR(stream->engine->mmio_base, 0));
+
+ ce = intel_context_create(stream->engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ /* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
+ scratch = kmap(ce->vm->scratch[0].base.page);
+ memset(scratch, POISON_FREE, PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+ i915_request_get(rq);
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out_rq;
+ }
+ }
+
+ /* Fill the 16 qword [32 dword] GPR with a known unlikely value */
+ cs = intel_ring_begin(rq, 2 * 32 + 2);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(32);
+ for (i = 0; i < 32; i++) {
+ *cs++ = gpr0 + i * sizeof(u32);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Execute the GPU delay */
+ err = rq->engine->emit_bb_start(rq,
+ i915_ggtt_offset(stream->noa_wait), 0,
+ I915_DISPATCH_SECURE);
+ if (err) {
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ /* Read the GPR back, using the pinned global HWSP for convenience */
+ store = memset32(rq->engine->status_page.addr + 512, 0, 32);
+ for (i = 0; i < 32; i++) {
+ u32 cmd;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ cmd = MI_STORE_REGISTER_MEM;
+ if (INTEL_GEN(i915) >= 8)
+ cmd++;
+ cmd |= MI_USE_GGTT;
+
+ *cs++ = cmd;
+ *cs++ = gpr0 + i * sizeof(u32);
+ *cs++ = i915_ggtt_offset(rq->engine->status_page.vma) +
+ offset_in_page(store) +
+ i * sizeof(u32);
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+ }
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, HZ / 2) < 0) {
+ pr_err("noa_wait timed out\n");
+ intel_gt_set_wedged(stream->engine->gt);
+ err = -EIO;
+ goto out_rq;
+ }
+
+ /* Verify that the GPR contain our expected values */
+ for (i = 0; i < 32; i++) {
+ if (store[i] == STACK_MAGIC)
+ continue;
+
+ pr_err("GPR[%d] lost, found:%08x, expected:%08x!\n",
+ i, store[i], STACK_MAGIC);
+ err = -EINVAL;
+ }
+
+ /* Verify that the user's scratch page was not used for GPR storage */
+ if (memchr_inv(scratch, POISON_FREE, PAGE_SIZE)) {
+ pr_err("Scratch page overwritten!\n");
+ igt_hexdump(scratch, 4096);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ kunmap(ce->vm->scratch[0].base.page);
+ intel_context_put(ce);
+out:
+ stream_destroy(stream);
+ return err;
+}
+
int i915_perf_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_noa_delay),
+ SUBTEST(live_noa_gpr),
};
struct i915_perf *perf = &i915->perf;
int err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 9271aad7f779..57dd6f5122ee 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -1454,7 +1454,7 @@ out_flush:
idx++;
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
- num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
+ num_waits, num_fences, idx, ncpus);
ret = igt_live_test_end(&live) ?: ret;
out_contexts:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9b105b811f1f..9a46be05425a 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -190,7 +190,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915, &i915->ggtt);
i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
- mkwrite_device_info(i915)->engine_mask = BIT(0);
+ mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
+ i915->gt.info.engine_mask = BIT(0);
i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
if (!i915->gt.engine[RCS0])
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index edc5e3dda8ca..b173086411ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -38,7 +38,8 @@ static void mock_insert_entries(struct i915_address_space *vm,
{
}
-static int mock_bind_ppgtt(struct i915_vma *vma,
+static int mock_bind_ppgtt(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -47,7 +48,8 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
return 0;
}
-static void mock_unbind_ppgtt(struct i915_vma *vma)
+static void mock_unbind_ppgtt(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
}
@@ -88,7 +90,8 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
return ppgtt;
}
-static int mock_bind_ggtt(struct i915_vma *vma,
+static int mock_bind_ggtt(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -96,7 +99,8 @@ static int mock_bind_ggtt(struct i915_vma *vma,
return 0;
}
-static void mock_unbind_ggtt(struct i915_vma *vma)
+static void mock_unbind_ggtt(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
}
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 04e1d38d41f7..08802e5177f6 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -812,7 +812,7 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc *crtc = &pipe->crtc;
struct drm_plane *plane = &pipe->plane;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
u32 format = fb->format->format;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index d300be5ee463..82137ab76cfc 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -209,7 +209,6 @@ static int mcde_modeset_init(struct drm_device *drm)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- drm_fbdev_generic_setup(drm, 32);
return 0;
}
@@ -254,6 +253,8 @@ static int mcde_drm_bind(struct device *dev)
if (ret < 0)
goto unbind;
+ drm_fbdev_generic_setup(drm, 32);
+
return 0;
unbind:
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index c420f5a3d33b..aa74aac3cbcc 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -6,12 +6,12 @@ config DRM_MEDIATEK
depends on COMMON_CLK
depends on HAVE_ARM_SMCCC
depends on OF
+ depends on MTK_MMSYS
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
- select MTK_MMSYS
select MTK_SMI
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index f6adc5b147c9..040834bdee9e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -189,7 +189,6 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
int ret;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
if (ret) {
@@ -209,7 +208,6 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
{
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
@@ -254,7 +252,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
int ret;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
if (WARN_ON(!crtc->state))
return -EINVAL;
@@ -295,7 +292,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
goto err_mutex_unprepare;
}
- DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
@@ -345,7 +341,6 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
struct drm_crtc *crtc = &mtk_crtc->base;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
if (i == 1)
@@ -827,7 +822,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_crtc->cmdq_client =
- cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+ cmdq_mbox_create(mtk_crtc->mmsys_dev,
+ drm_crtc_index(&mtk_crtc->base),
2000);
if (IS_ERR(mtk_crtc->cmdq_client)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 6bd369434d9d..040a8f393fe2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -444,7 +444,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
if (!private)
return -ENOMEM;
- private->data = of_device_get_match_data(dev);
private->mmsys_dev = dev->parent;
if (!private->mmsys_dev) {
dev_err(dev, "Failed to get MMSYS device\n");
@@ -514,7 +513,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
goto err_node;
}
- ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+ ret = mtk_ddp_comp_init(dev->parent, node, comp,
+ comp_id, NULL);
if (ret) {
of_node_put(node);
goto err_node;
@@ -571,7 +571,6 @@ static int mtk_drm_sys_suspend(struct device *dev)
int ret;
ret = drm_mode_config_helper_suspend(drm);
- DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
return ret;
}
@@ -583,7 +582,6 @@ static int mtk_drm_sys_resume(struct device *dev)
int ret;
ret = drm_mode_config_helper_resume(drm);
- DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index c2bd683a87c8..92141a19681b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -164,6 +164,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
true, true);
}
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+ state->pending.enable = false;
+ wmb(); /* Make sure the above parameter is set before update */
+ state->pending.dirty = true;
+}
+
static void mtk_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -178,6 +188,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
+ if (!plane->state->visible) {
+ mtk_plane_atomic_disable(plane, old_state);
+ return;
+ }
+
gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
@@ -200,16 +215,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
state->pending.dirty = true;
}
-static void mtk_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-
- state->pending.enable = false;
- wmb(); /* Make sure the above parameter is set before update */
- state->pending.dirty = true;
-}
-
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mtk_plane_atomic_check,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 270bf22c98fe..16fd99dcdacf 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -181,9 +182,9 @@ struct mtk_dsi {
struct device *dev;
struct mipi_dsi_host host;
struct drm_encoder encoder;
- struct drm_connector conn;
- struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_connector *connector;
struct phy *phy;
void __iomem *regs;
@@ -206,14 +207,9 @@ struct mtk_dsi {
const struct mtk_dsi_driver_data *driver_data;
};
-static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
-{
- return container_of(e, struct mtk_dsi, encoder);
-}
-
-static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c)
+static inline struct mtk_dsi *bridge_to_dsi(struct drm_bridge *b)
{
- return container_of(c, struct mtk_dsi, conn);
+ return container_of(b, struct mtk_dsi, bridge);
}
static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
@@ -316,10 +312,7 @@ static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
{
- u32 tmp_reg1;
-
- tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
- return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
+ return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN;
}
static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
@@ -681,16 +674,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
mtk_dsi_lane0_ulp_mode_leave(dsi);
mtk_dsi_clk_hs_mode(dsi, 0);
- if (dsi->panel) {
- if (drm_panel_prepare(dsi->panel)) {
- DRM_ERROR("failed to prepare the panel\n");
- goto err_disable_digital_clk;
- }
- }
-
return 0;
-err_disable_digital_clk:
- clk_disable_unprepare(dsi->digital_clk);
err_disable_engine_clk:
clk_disable_unprepare(dsi->engine_clk);
err_phy_power_off:
@@ -717,15 +701,7 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
*/
mtk_dsi_stop(dsi);
- if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
- if (dsi->panel) {
- if (drm_panel_unprepare(dsi->panel)) {
- DRM_ERROR("failed to unprepare the panel\n");
- return;
- }
- }
- }
-
+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -756,19 +732,7 @@ static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
mtk_dsi_start(dsi);
- if (dsi->panel) {
- if (drm_panel_enable(dsi->panel)) {
- DRM_ERROR("failed to enable the panel\n");
- goto err_dsi_power_off;
- }
- }
-
dsi->enabled = true;
-
- return;
-err_dsi_power_off:
- mtk_dsi_stop(dsi);
- mtk_dsi_poweroff(dsi);
}
static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
@@ -776,155 +740,51 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
- if (dsi->panel) {
- if (drm_panel_disable(dsi->panel)) {
- DRM_ERROR("failed to disable the panel\n");
- return;
- }
- }
-
mtk_dsi_poweroff(dsi);
dsi->enabled = false;
}
-static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- return true;
+ struct mtk_dsi *dsi = bridge_to_dsi(bridge);
+
+ /* Attach the panel or bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
+ &dsi->bridge, flags);
}
-static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
+static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted)
{
- struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+ struct mtk_dsi *dsi = bridge_to_dsi(bridge);
drm_display_mode_to_videomode(adjusted, &dsi->vm);
}
-static void mtk_dsi_encoder_disable(struct drm_encoder *encoder)
+static void mtk_dsi_bridge_disable(struct drm_bridge *bridge)
{
- struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+ struct mtk_dsi *dsi = bridge_to_dsi(bridge);
mtk_output_dsi_disable(dsi);
}
-static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
+static void mtk_dsi_bridge_enable(struct drm_bridge *bridge)
{
- struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+ struct mtk_dsi *dsi = bridge_to_dsi(bridge);
mtk_output_dsi_enable(dsi);
}
-static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
-{
- struct mtk_dsi *dsi = connector_to_dsi(connector);
-
- return drm_panel_get_modes(dsi->panel, connector);
-}
-
-static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
- .mode_fixup = mtk_dsi_encoder_mode_fixup,
- .mode_set = mtk_dsi_encoder_mode_set,
- .disable = mtk_dsi_encoder_disable,
- .enable = mtk_dsi_encoder_enable,
-};
-
-static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
+ .attach = mtk_dsi_bridge_attach,
+ .disable = mtk_dsi_bridge_disable,
+ .enable = mtk_dsi_bridge_enable,
+ .mode_set = mtk_dsi_bridge_mode_set,
};
-static const struct drm_connector_helper_funcs
- mtk_dsi_connector_helper_funcs = {
- .get_modes = mtk_dsi_connector_get_modes,
-};
-
-static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
-{
- int ret;
-
- ret = drm_connector_init(drm, &dsi->conn, &mtk_dsi_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
- if (ret) {
- DRM_ERROR("Failed to connector init to drm\n");
- return ret;
- }
-
- drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
-
- dsi->conn.dpms = DRM_MODE_DPMS_OFF;
- drm_connector_attach_encoder(&dsi->conn, &dsi->encoder);
-
- if (dsi->panel) {
- ret = drm_panel_attach(dsi->panel, &dsi->conn);
- if (ret) {
- DRM_ERROR("Failed to attach panel to drm\n");
- goto err_connector_cleanup;
- }
- }
-
- return 0;
-
-err_connector_cleanup:
- drm_connector_cleanup(&dsi->conn);
- return ret;
-}
-
-static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
-{
- int ret;
-
- ret = drm_simple_encoder_init(drm, &dsi->encoder,
- DRM_MODE_ENCODER_DSI);
- if (ret) {
- DRM_ERROR("Failed to encoder init to drm\n");
- return ret;
- }
- drm_encoder_helper_add(&dsi->encoder, &mtk_dsi_encoder_helper_funcs);
-
- /*
- * Currently display data paths are statically assigned to a crtc each.
- * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
- */
- dsi->encoder.possible_crtcs = 1;
-
- /* If there's a bridge, attach to it and let it create the connector */
- if (dsi->bridge) {
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL, 0);
- if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
- goto err_encoder_cleanup;
- }
- } else {
- /* Otherwise create our own connector and attach to a panel */
- ret = mtk_dsi_create_connector(drm, dsi);
- if (ret)
- goto err_encoder_cleanup;
- }
-
- return 0;
-
-err_encoder_cleanup:
- drm_encoder_cleanup(&dsi->encoder);
- return ret;
-}
-
-static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
-{
- drm_encoder_cleanup(&dsi->encoder);
- /* Skip connector cleanup if creation was delegated to the bridge */
- if (dsi->conn.dev)
- drm_connector_cleanup(&dsi->conn);
- if (dsi->panel)
- drm_panel_detach(dsi->panel);
-}
-
static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
{
struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
@@ -953,20 +813,6 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
- if (dsi->conn.dev)
- drm_helper_hpd_irq_event(dsi->conn.dev);
-
- return 0;
-}
-
-static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct mtk_dsi *dsi = host_to_dsi(host);
-
- if (dsi->conn.dev)
- drm_helper_hpd_irq_event(dsi->conn.dev);
-
return 0;
}
@@ -1110,10 +956,46 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
- .detach = mtk_dsi_host_detach,
.transfer = mtk_dsi_host_transfer,
};
+static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+ int ret;
+
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
+ if (ret) {
+ DRM_ERROR("Failed to encoder init to drm\n");
+ return ret;
+ }
+
+ /*
+ * Currently display data paths are statically assigned to a crtc each.
+ * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
+ */
+ dsi->encoder.possible_crtcs = 1;
+
+ ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ goto err_cleanup_encoder;
+
+ dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
+ if (IS_ERR(dsi->connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ ret = PTR_ERR(dsi->connector);
+ goto err_cleanup_encoder;
+ }
+ drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
+
+ return 0;
+
+err_cleanup_encoder:
+ drm_encoder_cleanup(&dsi->encoder);
+ return ret;
+}
+
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
{
int ret;
@@ -1127,11 +1009,9 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = mtk_dsi_create_conn_enc(drm, dsi);
- if (ret) {
- DRM_ERROR("Encoder create failed with %d\n", ret);
+ ret = mtk_dsi_encoder_init(drm, dsi);
+ if (ret)
goto err_unregister;
- }
return 0;
@@ -1146,7 +1026,7 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct mtk_dsi *dsi = dev_get_drvdata(dev);
- mtk_dsi_destroy_conn_enc(dsi);
+ drm_encoder_cleanup(&dsi->encoder);
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
}
@@ -1159,6 +1039,7 @@ static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
+ struct drm_panel *panel;
struct resource *regs;
int irq_num;
int comp_id;
@@ -1177,10 +1058,18 @@ static int mtk_dsi_probe(struct platform_device *pdev)
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &dsi->panel, &dsi->bridge);
+ &panel, &dsi->next_bridge);
if (ret)
goto err_unregister_host;
+ if (panel) {
+ dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(dsi->next_bridge)) {
+ ret = PTR_ERR(dsi->next_bridge);
+ goto err_unregister_host;
+ }
+ }
+
dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
@@ -1256,6 +1145,12 @@ static int mtk_dsi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dsi);
+ dsi->bridge.funcs = &mtk_dsi_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+ drm_bridge_add(&dsi->bridge);
+
ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
if (ret) {
dev_err(&pdev->dev, "failed to add component: %d\n", ret);
@@ -1274,6 +1169,7 @@ static int mtk_dsi_remove(struct platform_device *pdev)
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi);
+ drm_bridge_remove(&dsi->bridge);
component_del(&pdev->dev, &mtk_dsi_component_ops);
mipi_dsi_host_unregister(&dsi->host);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 55a4d095606f..95591262ac36 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1630,8 +1630,6 @@ static int mtk_hdmi_audio_startup(struct device *dev, void *data)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mtk_hdmi_audio_enable(hdmi);
return 0;
@@ -1641,8 +1639,6 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mtk_hdmi_audio_disable(hdmi);
}
@@ -1651,8 +1647,6 @@ mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s(%d)\n", __func__, enable);
-
if (enable)
mtk_hdmi_hw_aud_mute(hdmi);
else
@@ -1665,8 +1659,6 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
return 0;
@@ -1766,7 +1758,6 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
goto err_bridge_remove;
}
- dev_dbg(dev, "mediatek hdmi probe success\n");
return 0;
err_bridge_remove:
@@ -1789,7 +1780,7 @@ static int mtk_hdmi_suspend(struct device *dev)
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
mtk_hdmi_clk_disable_audio(hdmi);
- dev_dbg(dev, "hdmi suspend success!\n");
+
return 0;
}
@@ -1804,7 +1795,6 @@ static int mtk_hdmi_resume(struct device *dev)
return ret;
}
- dev_dbg(dev, "hdmi resume success!\n");
return 0;
}
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index b55f51675205..827b93786fac 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -107,60 +107,10 @@
#define RGS_HDMITX_5T1_EDG (0xf << 4)
#define RGS_HDMITX_PLUG_TST BIT(0)
-static const u8 PREDIV[3][4] = {
- {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */
- {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x1, 0x1, 0x1} /* 148Mhz */
-};
-
-static const u8 TXDIV[3][4] = {
- {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */
- {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x0, 0x0, 0x0} /* 148Mhz */
-};
-
-static const u8 FBKSEL[3][4] = {
- {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */
- {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x0, 0x1, 0x1} /* 148Mhz */
-};
-
-static const u8 FBKDIV[3][4] = {
- {19, 24, 29, 19}, /* 27Mhz */
- {19, 24, 14, 19}, /* 74Mhz */
- {19, 24, 14, 19} /* 148Mhz */
-};
-
-static const u8 DIVEN[3][4] = {
- {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */
- {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */
- {0x2, 0x2, 0x2, 0x2} /* 148Mhz */
-};
-
-static const u8 HTPLLBP[3][4] = {
- {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */
- {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */
- {0xc, 0xf, 0xf, 0xc} /* 148Mhz */
-};
-
-static const u8 HTPLLBC[3][4] = {
- {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */
- {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */
- {0x2, 0x3, 0x3, 0x2} /* 148Mhz */
-};
-
-static const u8 HTPLLBR[3][4] = {
- {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */
- {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */
- {0x1, 0x2, 0x2, 0x1} /* 148Mhz */
-};
-
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
- dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
@@ -178,8 +128,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
- dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
usleep_range(100, 150);
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 08631fdfe4b9..446e7961da48 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -266,6 +266,12 @@
#define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12)
#define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22)
#define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24)
+#define VIU_OSD_BURST_LENGTH_24 (0x0 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_32 (0x0 << 31 | 0x1 << 10)
+#define VIU_OSD_BURST_LENGTH_48 (0x0 << 31 | 0x2 << 10)
+#define VIU_OSD_BURST_LENGTH_64 (0x0 << 31 | 0x3 << 10)
+#define VIU_OSD_BURST_LENGTH_96 (0x1 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_128 (0x1 << 31 | 0x1 << 10)
#define VD1_IF0_GEN_REG 0x1a50
#define VD1_IF0_CANVAS0 0x1a51
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 304f8ff1339c..aede0c67a57f 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -411,13 +411,6 @@ void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
priv->io_base + _REG(VIU_MISC_CTRL1));
}
-static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
-{
- uint32_t val = (((length & 0x80) % 24) / 12);
-
- return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
-}
-
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
@@ -444,9 +437,9 @@ void meson_viu_init(struct meson_drm *priv)
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
- reg |= meson_viu_osd_burst_length_reg(32);
+ reg |= VIU_OSD_BURST_LENGTH_32;
else
- reg |= meson_viu_osd_burst_length_reg(64);
+ reg |= VIU_OSD_BURST_LENGTH_64;
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 14eb52f3e605..54e1b2aa57d5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -210,6 +212,854 @@ enum a2xx_rb_blend_opcode {
BLEND2_DST_PLUS_SRC_BIAS = 5,
};
+enum a2xx_su_perfcnt_select {
+ PERF_PAPC_PASX_REQ = 0,
+ PERF_PAPC_PASX_FIRST_VECTOR = 2,
+ PERF_PAPC_PASX_SECOND_VECTOR = 3,
+ PERF_PAPC_PASX_FIRST_DEAD = 4,
+ PERF_PAPC_PASX_SECOND_DEAD = 5,
+ PERF_PAPC_PASX_VTX_KILL_DISCARD = 6,
+ PERF_PAPC_PASX_VTX_NAN_DISCARD = 7,
+ PERF_PAPC_PA_INPUT_PRIM = 8,
+ PERF_PAPC_PA_INPUT_NULL_PRIM = 9,
+ PERF_PAPC_PA_INPUT_EVENT_FLAG = 10,
+ PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT = 11,
+ PERF_PAPC_PA_INPUT_END_OF_PACKET = 12,
+ PERF_PAPC_CLPR_CULL_PRIM = 13,
+ PERF_PAPC_CLPR_VV_CULL_PRIM = 15,
+ PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM = 17,
+ PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM = 18,
+ PERF_PAPC_CLPR_CULL_TO_NULL_PRIM = 19,
+ PERF_PAPC_CLPR_VV_CLIP_PRIM = 21,
+ PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE = 23,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_1 = 24,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_2 = 25,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_3 = 26,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_4 = 27,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_5 = 28,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_6 = 29,
+ PERF_PAPC_CLPR_CLIP_PLANE_NEAR = 30,
+ PERF_PAPC_CLPR_CLIP_PLANE_FAR = 31,
+ PERF_PAPC_CLPR_CLIP_PLANE_LEFT = 32,
+ PERF_PAPC_CLPR_CLIP_PLANE_RIGHT = 33,
+ PERF_PAPC_CLPR_CLIP_PLANE_TOP = 34,
+ PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM = 35,
+ PERF_PAPC_CLSM_NULL_PRIM = 36,
+ PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM = 37,
+ PERF_PAPC_CLSM_CLIP_PRIM = 38,
+ PERF_PAPC_CLSM_CULL_TO_NULL_PRIM = 39,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_1 = 40,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_2 = 41,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_3 = 42,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_4 = 43,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_5 = 44,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_6_7 = 45,
+ PERF_PAPC_CLSM_NON_TRIVIAL_CULL = 46,
+ PERF_PAPC_SU_INPUT_PRIM = 47,
+ PERF_PAPC_SU_INPUT_CLIP_PRIM = 48,
+ PERF_PAPC_SU_INPUT_NULL_PRIM = 49,
+ PERF_PAPC_SU_ZERO_AREA_CULL_PRIM = 50,
+ PERF_PAPC_SU_BACK_FACE_CULL_PRIM = 51,
+ PERF_PAPC_SU_FRONT_FACE_CULL_PRIM = 52,
+ PERF_PAPC_SU_POLYMODE_FACE_CULL = 53,
+ PERF_PAPC_SU_POLYMODE_BACK_CULL = 54,
+ PERF_PAPC_SU_POLYMODE_FRONT_CULL = 55,
+ PERF_PAPC_SU_POLYMODE_INVALID_FILL = 56,
+ PERF_PAPC_SU_OUTPUT_PRIM = 57,
+ PERF_PAPC_SU_OUTPUT_CLIP_PRIM = 58,
+ PERF_PAPC_SU_OUTPUT_NULL_PRIM = 59,
+ PERF_PAPC_SU_OUTPUT_EVENT_FLAG = 60,
+ PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT = 61,
+ PERF_PAPC_SU_OUTPUT_END_OF_PACKET = 62,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_FACE = 63,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_BACK = 64,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT = 65,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE = 66,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK = 67,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT = 68,
+ PERF_PAPC_PASX_REQ_IDLE = 69,
+ PERF_PAPC_PASX_REQ_BUSY = 70,
+ PERF_PAPC_PASX_REQ_STALLED = 71,
+ PERF_PAPC_PASX_REC_IDLE = 72,
+ PERF_PAPC_PASX_REC_BUSY = 73,
+ PERF_PAPC_PASX_REC_STARVED_SX = 74,
+ PERF_PAPC_PASX_REC_STALLED = 75,
+ PERF_PAPC_PASX_REC_STALLED_POS_MEM = 76,
+ PERF_PAPC_PASX_REC_STALLED_CCGSM_IN = 77,
+ PERF_PAPC_CCGSM_IDLE = 78,
+ PERF_PAPC_CCGSM_BUSY = 79,
+ PERF_PAPC_CCGSM_STALLED = 80,
+ PERF_PAPC_CLPRIM_IDLE = 81,
+ PERF_PAPC_CLPRIM_BUSY = 82,
+ PERF_PAPC_CLPRIM_STALLED = 83,
+ PERF_PAPC_CLPRIM_STARVED_CCGSM = 84,
+ PERF_PAPC_CLIPSM_IDLE = 85,
+ PERF_PAPC_CLIPSM_BUSY = 86,
+ PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH = 87,
+ PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ = 88,
+ PERF_PAPC_CLIPSM_WAIT_CLIPGA = 89,
+ PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP = 90,
+ PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM = 91,
+ PERF_PAPC_CLIPGA_IDLE = 92,
+ PERF_PAPC_CLIPGA_BUSY = 93,
+ PERF_PAPC_CLIPGA_STARVED_VTE_CLIP = 94,
+ PERF_PAPC_CLIPGA_STALLED = 95,
+ PERF_PAPC_CLIP_IDLE = 96,
+ PERF_PAPC_CLIP_BUSY = 97,
+ PERF_PAPC_SU_IDLE = 98,
+ PERF_PAPC_SU_BUSY = 99,
+ PERF_PAPC_SU_STARVED_CLIP = 100,
+ PERF_PAPC_SU_STALLED_SC = 101,
+ PERF_PAPC_SU_FACENESS_CULL = 102,
+};
+
+enum a2xx_sc_perfcnt_select {
+ SC_SR_WINDOW_VALID = 0,
+ SC_CW_WINDOW_VALID = 1,
+ SC_QM_WINDOW_VALID = 2,
+ SC_FW_WINDOW_VALID = 3,
+ SC_EZ_WINDOW_VALID = 4,
+ SC_IT_WINDOW_VALID = 5,
+ SC_STARVED_BY_PA = 6,
+ SC_STALLED_BY_RB_TILE = 7,
+ SC_STALLED_BY_RB_SAMP = 8,
+ SC_STARVED_BY_RB_EZ = 9,
+ SC_STALLED_BY_SAMPLE_FF = 10,
+ SC_STALLED_BY_SQ = 11,
+ SC_STALLED_BY_SP = 12,
+ SC_TOTAL_NO_PRIMS = 13,
+ SC_NON_EMPTY_PRIMS = 14,
+ SC_NO_TILES_PASSING_QM = 15,
+ SC_NO_PIXELS_PRE_EZ = 16,
+ SC_NO_PIXELS_POST_EZ = 17,
+};
+
+enum a2xx_vgt_perfcount_select {
+ VGT_SQ_EVENT_WINDOW_ACTIVE = 0,
+ VGT_SQ_SEND = 1,
+ VGT_SQ_STALLED = 2,
+ VGT_SQ_STARVED_BUSY = 3,
+ VGT_SQ_STARVED_IDLE = 4,
+ VGT_SQ_STATIC = 5,
+ VGT_PA_EVENT_WINDOW_ACTIVE = 6,
+ VGT_PA_CLIP_V_SEND = 7,
+ VGT_PA_CLIP_V_STALLED = 8,
+ VGT_PA_CLIP_V_STARVED_BUSY = 9,
+ VGT_PA_CLIP_V_STARVED_IDLE = 10,
+ VGT_PA_CLIP_V_STATIC = 11,
+ VGT_PA_CLIP_P_SEND = 12,
+ VGT_PA_CLIP_P_STALLED = 13,
+ VGT_PA_CLIP_P_STARVED_BUSY = 14,
+ VGT_PA_CLIP_P_STARVED_IDLE = 15,
+ VGT_PA_CLIP_P_STATIC = 16,
+ VGT_PA_CLIP_S_SEND = 17,
+ VGT_PA_CLIP_S_STALLED = 18,
+ VGT_PA_CLIP_S_STARVED_BUSY = 19,
+ VGT_PA_CLIP_S_STARVED_IDLE = 20,
+ VGT_PA_CLIP_S_STATIC = 21,
+ RBIU_FIFOS_EVENT_WINDOW_ACTIVE = 22,
+ RBIU_IMMED_DATA_FIFO_STARVED = 23,
+ RBIU_IMMED_DATA_FIFO_STALLED = 24,
+ RBIU_DMA_REQUEST_FIFO_STARVED = 25,
+ RBIU_DMA_REQUEST_FIFO_STALLED = 26,
+ RBIU_DRAW_INITIATOR_FIFO_STARVED = 27,
+ RBIU_DRAW_INITIATOR_FIFO_STALLED = 28,
+ BIN_PRIM_NEAR_CULL = 29,
+ BIN_PRIM_ZERO_CULL = 30,
+ BIN_PRIM_FAR_CULL = 31,
+ BIN_PRIM_BIN_CULL = 32,
+ BIN_PRIM_FACE_CULL = 33,
+ SPARE34 = 34,
+ SPARE35 = 35,
+ SPARE36 = 36,
+ SPARE37 = 37,
+ SPARE38 = 38,
+ SPARE39 = 39,
+ TE_SU_IN_VALID = 40,
+ TE_SU_IN_READ = 41,
+ TE_SU_IN_PRIM = 42,
+ TE_SU_IN_EOP = 43,
+ TE_SU_IN_NULL_PRIM = 44,
+ TE_WK_IN_VALID = 45,
+ TE_WK_IN_READ = 46,
+ TE_OUT_PRIM_VALID = 47,
+ TE_OUT_PRIM_READ = 48,
+};
+
+enum a2xx_tcr_perfcount_select {
+ DGMMPD_IPMUX0_STALL = 0,
+ DGMMPD_IPMUX_ALL_STALL = 4,
+ OPMUX0_L2_WRITES = 5,
+};
+
+enum a2xx_tp_perfcount_select {
+ POINT_QUADS = 0,
+ BILIN_QUADS = 1,
+ ANISO_QUADS = 2,
+ MIP_QUADS = 3,
+ VOL_QUADS = 4,
+ MIP_VOL_QUADS = 5,
+ MIP_ANISO_QUADS = 6,
+ VOL_ANISO_QUADS = 7,
+ ANISO_2_1_QUADS = 8,
+ ANISO_4_1_QUADS = 9,
+ ANISO_6_1_QUADS = 10,
+ ANISO_8_1_QUADS = 11,
+ ANISO_10_1_QUADS = 12,
+ ANISO_12_1_QUADS = 13,
+ ANISO_14_1_QUADS = 14,
+ ANISO_16_1_QUADS = 15,
+ MIP_VOL_ANISO_QUADS = 16,
+ ALIGN_2_QUADS = 17,
+ ALIGN_4_QUADS = 18,
+ PIX_0_QUAD = 19,
+ PIX_1_QUAD = 20,
+ PIX_2_QUAD = 21,
+ PIX_3_QUAD = 22,
+ PIX_4_QUAD = 23,
+ TP_MIPMAP_LOD0 = 24,
+ TP_MIPMAP_LOD1 = 25,
+ TP_MIPMAP_LOD2 = 26,
+ TP_MIPMAP_LOD3 = 27,
+ TP_MIPMAP_LOD4 = 28,
+ TP_MIPMAP_LOD5 = 29,
+ TP_MIPMAP_LOD6 = 30,
+ TP_MIPMAP_LOD7 = 31,
+ TP_MIPMAP_LOD8 = 32,
+ TP_MIPMAP_LOD9 = 33,
+ TP_MIPMAP_LOD10 = 34,
+ TP_MIPMAP_LOD11 = 35,
+ TP_MIPMAP_LOD12 = 36,
+ TP_MIPMAP_LOD13 = 37,
+ TP_MIPMAP_LOD14 = 38,
+};
+
+enum a2xx_tcm_perfcount_select {
+ QUAD0_RD_LAT_FIFO_EMPTY = 0,
+ QUAD0_RD_LAT_FIFO_4TH_FULL = 3,
+ QUAD0_RD_LAT_FIFO_HALF_FULL = 4,
+ QUAD0_RD_LAT_FIFO_FULL = 5,
+ QUAD0_RD_LAT_FIFO_LT_4TH_FULL = 6,
+ READ_STARVED_QUAD0 = 28,
+ READ_STARVED = 32,
+ READ_STALLED_QUAD0 = 33,
+ READ_STALLED = 37,
+ VALID_READ_QUAD0 = 38,
+ TC_TP_STARVED_QUAD0 = 42,
+ TC_TP_STARVED = 46,
+};
+
+enum a2xx_tcf_perfcount_select {
+ VALID_CYCLES = 0,
+ SINGLE_PHASES = 1,
+ ANISO_PHASES = 2,
+ MIP_PHASES = 3,
+ VOL_PHASES = 4,
+ MIP_VOL_PHASES = 5,
+ MIP_ANISO_PHASES = 6,
+ VOL_ANISO_PHASES = 7,
+ ANISO_2_1_PHASES = 8,
+ ANISO_4_1_PHASES = 9,
+ ANISO_6_1_PHASES = 10,
+ ANISO_8_1_PHASES = 11,
+ ANISO_10_1_PHASES = 12,
+ ANISO_12_1_PHASES = 13,
+ ANISO_14_1_PHASES = 14,
+ ANISO_16_1_PHASES = 15,
+ MIP_VOL_ANISO_PHASES = 16,
+ ALIGN_2_PHASES = 17,
+ ALIGN_4_PHASES = 18,
+ TPC_BUSY = 19,
+ TPC_STALLED = 20,
+ TPC_STARVED = 21,
+ TPC_WORKING = 22,
+ TPC_WALKER_BUSY = 23,
+ TPC_WALKER_STALLED = 24,
+ TPC_WALKER_WORKING = 25,
+ TPC_ALIGNER_BUSY = 26,
+ TPC_ALIGNER_STALLED = 27,
+ TPC_ALIGNER_STALLED_BY_BLEND = 28,
+ TPC_ALIGNER_STALLED_BY_CACHE = 29,
+ TPC_ALIGNER_WORKING = 30,
+ TPC_BLEND_BUSY = 31,
+ TPC_BLEND_SYNC = 32,
+ TPC_BLEND_STARVED = 33,
+ TPC_BLEND_WORKING = 34,
+ OPCODE_0x00 = 35,
+ OPCODE_0x01 = 36,
+ OPCODE_0x04 = 37,
+ OPCODE_0x10 = 38,
+ OPCODE_0x11 = 39,
+ OPCODE_0x12 = 40,
+ OPCODE_0x13 = 41,
+ OPCODE_0x18 = 42,
+ OPCODE_0x19 = 43,
+ OPCODE_0x1A = 44,
+ OPCODE_OTHER = 45,
+ IN_FIFO_0_EMPTY = 56,
+ IN_FIFO_0_LT_HALF_FULL = 57,
+ IN_FIFO_0_HALF_FULL = 58,
+ IN_FIFO_0_FULL = 59,
+ IN_FIFO_TPC_EMPTY = 72,
+ IN_FIFO_TPC_LT_HALF_FULL = 73,
+ IN_FIFO_TPC_HALF_FULL = 74,
+ IN_FIFO_TPC_FULL = 75,
+ TPC_TC_XFC = 76,
+ TPC_TC_STATE = 77,
+ TC_STALL = 78,
+ QUAD0_TAPS = 79,
+ QUADS = 83,
+ TCA_SYNC_STALL = 84,
+ TAG_STALL = 85,
+ TCB_SYNC_STALL = 88,
+ TCA_VALID = 89,
+ PROBES_VALID = 90,
+ MISS_STALL = 91,
+ FETCH_FIFO_STALL = 92,
+ TCO_STALL = 93,
+ ANY_STALL = 94,
+ TAG_MISSES = 95,
+ TAG_HITS = 96,
+ SUB_TAG_MISSES = 97,
+ SET0_INVALIDATES = 98,
+ SET1_INVALIDATES = 99,
+ SET2_INVALIDATES = 100,
+ SET3_INVALIDATES = 101,
+ SET0_TAG_MISSES = 102,
+ SET1_TAG_MISSES = 103,
+ SET2_TAG_MISSES = 104,
+ SET3_TAG_MISSES = 105,
+ SET0_TAG_HITS = 106,
+ SET1_TAG_HITS = 107,
+ SET2_TAG_HITS = 108,
+ SET3_TAG_HITS = 109,
+ SET0_SUB_TAG_MISSES = 110,
+ SET1_SUB_TAG_MISSES = 111,
+ SET2_SUB_TAG_MISSES = 112,
+ SET3_SUB_TAG_MISSES = 113,
+ SET0_EVICT1 = 114,
+ SET0_EVICT2 = 115,
+ SET0_EVICT3 = 116,
+ SET0_EVICT4 = 117,
+ SET0_EVICT5 = 118,
+ SET0_EVICT6 = 119,
+ SET0_EVICT7 = 120,
+ SET0_EVICT8 = 121,
+ SET1_EVICT1 = 130,
+ SET1_EVICT2 = 131,
+ SET1_EVICT3 = 132,
+ SET1_EVICT4 = 133,
+ SET1_EVICT5 = 134,
+ SET1_EVICT6 = 135,
+ SET1_EVICT7 = 136,
+ SET1_EVICT8 = 137,
+ SET2_EVICT1 = 146,
+ SET2_EVICT2 = 147,
+ SET2_EVICT3 = 148,
+ SET2_EVICT4 = 149,
+ SET2_EVICT5 = 150,
+ SET2_EVICT6 = 151,
+ SET2_EVICT7 = 152,
+ SET2_EVICT8 = 153,
+ SET3_EVICT1 = 162,
+ SET3_EVICT2 = 163,
+ SET3_EVICT3 = 164,
+ SET3_EVICT4 = 165,
+ SET3_EVICT5 = 166,
+ SET3_EVICT6 = 167,
+ SET3_EVICT7 = 168,
+ SET3_EVICT8 = 169,
+ FF_EMPTY = 178,
+ FF_LT_HALF_FULL = 179,
+ FF_HALF_FULL = 180,
+ FF_FULL = 181,
+ FF_XFC = 182,
+ FF_STALLED = 183,
+ FG_MASKS = 184,
+ FG_LEFT_MASKS = 185,
+ FG_LEFT_MASK_STALLED = 186,
+ FG_LEFT_NOT_DONE_STALL = 187,
+ FG_LEFT_FG_STALL = 188,
+ FG_LEFT_SECTORS = 189,
+ FG0_REQUESTS = 195,
+ FG0_STALLED = 196,
+ MEM_REQ512 = 199,
+ MEM_REQ_SENT = 200,
+ MEM_LOCAL_READ_REQ = 202,
+ TC0_MH_STALLED = 203,
+};
+
+enum a2xx_sq_perfcnt_select {
+ SQ_PIXEL_VECTORS_SUB = 0,
+ SQ_VERTEX_VECTORS_SUB = 1,
+ SQ_ALU0_ACTIVE_VTX_SIMD0 = 2,
+ SQ_ALU1_ACTIVE_VTX_SIMD0 = 3,
+ SQ_ALU0_ACTIVE_PIX_SIMD0 = 4,
+ SQ_ALU1_ACTIVE_PIX_SIMD0 = 5,
+ SQ_ALU0_ACTIVE_VTX_SIMD1 = 6,
+ SQ_ALU1_ACTIVE_VTX_SIMD1 = 7,
+ SQ_ALU0_ACTIVE_PIX_SIMD1 = 8,
+ SQ_ALU1_ACTIVE_PIX_SIMD1 = 9,
+ SQ_EXPORT_CYCLES = 10,
+ SQ_ALU_CST_WRITTEN = 11,
+ SQ_TEX_CST_WRITTEN = 12,
+ SQ_ALU_CST_STALL = 13,
+ SQ_ALU_TEX_STALL = 14,
+ SQ_INST_WRITTEN = 15,
+ SQ_BOOLEAN_WRITTEN = 16,
+ SQ_LOOPS_WRITTEN = 17,
+ SQ_PIXEL_SWAP_IN = 18,
+ SQ_PIXEL_SWAP_OUT = 19,
+ SQ_VERTEX_SWAP_IN = 20,
+ SQ_VERTEX_SWAP_OUT = 21,
+ SQ_ALU_VTX_INST_ISSUED = 22,
+ SQ_TEX_VTX_INST_ISSUED = 23,
+ SQ_VC_VTX_INST_ISSUED = 24,
+ SQ_CF_VTX_INST_ISSUED = 25,
+ SQ_ALU_PIX_INST_ISSUED = 26,
+ SQ_TEX_PIX_INST_ISSUED = 27,
+ SQ_VC_PIX_INST_ISSUED = 28,
+ SQ_CF_PIX_INST_ISSUED = 29,
+ SQ_ALU0_FIFO_EMPTY_SIMD0 = 30,
+ SQ_ALU1_FIFO_EMPTY_SIMD0 = 31,
+ SQ_ALU0_FIFO_EMPTY_SIMD1 = 32,
+ SQ_ALU1_FIFO_EMPTY_SIMD1 = 33,
+ SQ_ALU_NOPS = 34,
+ SQ_PRED_SKIP = 35,
+ SQ_SYNC_ALU_STALL_SIMD0_VTX = 36,
+ SQ_SYNC_ALU_STALL_SIMD1_VTX = 37,
+ SQ_SYNC_TEX_STALL_VTX = 38,
+ SQ_SYNC_VC_STALL_VTX = 39,
+ SQ_CONSTANTS_USED_SIMD0 = 40,
+ SQ_CONSTANTS_SENT_SP_SIMD0 = 41,
+ SQ_GPR_STALL_VTX = 42,
+ SQ_GPR_STALL_PIX = 43,
+ SQ_VTX_RS_STALL = 44,
+ SQ_PIX_RS_STALL = 45,
+ SQ_SX_PC_FULL = 46,
+ SQ_SX_EXP_BUFF_FULL = 47,
+ SQ_SX_POS_BUFF_FULL = 48,
+ SQ_INTERP_QUADS = 49,
+ SQ_INTERP_ACTIVE = 50,
+ SQ_IN_PIXEL_STALL = 51,
+ SQ_IN_VTX_STALL = 52,
+ SQ_VTX_CNT = 53,
+ SQ_VTX_VECTOR2 = 54,
+ SQ_VTX_VECTOR3 = 55,
+ SQ_VTX_VECTOR4 = 56,
+ SQ_PIXEL_VECTOR1 = 57,
+ SQ_PIXEL_VECTOR23 = 58,
+ SQ_PIXEL_VECTOR4 = 59,
+ SQ_CONSTANTS_USED_SIMD1 = 60,
+ SQ_CONSTANTS_SENT_SP_SIMD1 = 61,
+ SQ_SX_MEM_EXP_FULL = 62,
+ SQ_ALU0_ACTIVE_VTX_SIMD2 = 63,
+ SQ_ALU1_ACTIVE_VTX_SIMD2 = 64,
+ SQ_ALU0_ACTIVE_PIX_SIMD2 = 65,
+ SQ_ALU1_ACTIVE_PIX_SIMD2 = 66,
+ SQ_ALU0_ACTIVE_VTX_SIMD3 = 67,
+ SQ_PERFCOUNT_VTX_QUAL_TP_DONE = 68,
+ SQ_ALU0_ACTIVE_PIX_SIMD3 = 69,
+ SQ_PERFCOUNT_PIX_QUAL_TP_DONE = 70,
+ SQ_ALU0_FIFO_EMPTY_SIMD2 = 71,
+ SQ_ALU1_FIFO_EMPTY_SIMD2 = 72,
+ SQ_ALU0_FIFO_EMPTY_SIMD3 = 73,
+ SQ_ALU1_FIFO_EMPTY_SIMD3 = 74,
+ SQ_SYNC_ALU_STALL_SIMD2_VTX = 75,
+ SQ_PERFCOUNT_VTX_POP_THREAD = 76,
+ SQ_SYNC_ALU_STALL_SIMD0_PIX = 77,
+ SQ_SYNC_ALU_STALL_SIMD1_PIX = 78,
+ SQ_SYNC_ALU_STALL_SIMD2_PIX = 79,
+ SQ_PERFCOUNT_PIX_POP_THREAD = 80,
+ SQ_SYNC_TEX_STALL_PIX = 81,
+ SQ_SYNC_VC_STALL_PIX = 82,
+ SQ_CONSTANTS_USED_SIMD2 = 83,
+ SQ_CONSTANTS_SENT_SP_SIMD2 = 84,
+ SQ_PERFCOUNT_VTX_DEALLOC_ACK = 85,
+ SQ_PERFCOUNT_PIX_DEALLOC_ACK = 86,
+ SQ_ALU0_FIFO_FULL_SIMD0 = 87,
+ SQ_ALU1_FIFO_FULL_SIMD0 = 88,
+ SQ_ALU0_FIFO_FULL_SIMD1 = 89,
+ SQ_ALU1_FIFO_FULL_SIMD1 = 90,
+ SQ_ALU0_FIFO_FULL_SIMD2 = 91,
+ SQ_ALU1_FIFO_FULL_SIMD2 = 92,
+ SQ_ALU0_FIFO_FULL_SIMD3 = 93,
+ SQ_ALU1_FIFO_FULL_SIMD3 = 94,
+ VC_PERF_STATIC = 95,
+ VC_PERF_STALLED = 96,
+ VC_PERF_STARVED = 97,
+ VC_PERF_SEND = 98,
+ VC_PERF_ACTUAL_STARVED = 99,
+ PIXEL_THREAD_0_ACTIVE = 100,
+ VERTEX_THREAD_0_ACTIVE = 101,
+ PIXEL_THREAD_0_NUMBER = 102,
+ VERTEX_THREAD_0_NUMBER = 103,
+ VERTEX_EVENT_NUMBER = 104,
+ PIXEL_EVENT_NUMBER = 105,
+ PTRBUFF_EF_PUSH = 106,
+ PTRBUFF_EF_POP_EVENT = 107,
+ PTRBUFF_EF_POP_NEW_VTX = 108,
+ PTRBUFF_EF_POP_DEALLOC = 109,
+ PTRBUFF_EF_POP_PVECTOR = 110,
+ PTRBUFF_EF_POP_PVECTOR_X = 111,
+ PTRBUFF_EF_POP_PVECTOR_VNZ = 112,
+ PTRBUFF_PB_DEALLOC = 113,
+ PTRBUFF_PI_STATE_PPB_POP = 114,
+ PTRBUFF_PI_RTR = 115,
+ PTRBUFF_PI_READ_EN = 116,
+ PTRBUFF_PI_BUFF_SWAP = 117,
+ PTRBUFF_SQ_FREE_BUFF = 118,
+ PTRBUFF_SQ_DEC = 119,
+ PTRBUFF_SC_VALID_CNTL_EVENT = 120,
+ PTRBUFF_SC_VALID_IJ_XFER = 121,
+ PTRBUFF_SC_NEW_VECTOR_1_Q = 122,
+ PTRBUFF_QUAL_NEW_VECTOR = 123,
+ PTRBUFF_QUAL_EVENT = 124,
+ PTRBUFF_END_BUFFER = 125,
+ PTRBUFF_FILL_QUAD = 126,
+ VERTS_WRITTEN_SPI = 127,
+ TP_FETCH_INSTR_EXEC = 128,
+ TP_FETCH_INSTR_REQ = 129,
+ TP_DATA_RETURN = 130,
+ SPI_WRITE_CYCLES_SP = 131,
+ SPI_WRITES_SP = 132,
+ SP_ALU_INSTR_EXEC = 133,
+ SP_CONST_ADDR_TO_SQ = 134,
+ SP_PRED_KILLS_TO_SQ = 135,
+ SP_EXPORT_CYCLES_TO_SX = 136,
+ SP_EXPORTS_TO_SX = 137,
+ SQ_CYCLES_ELAPSED = 138,
+ SQ_TCFS_OPT_ALLOC_EXEC = 139,
+ SQ_TCFS_NO_OPT_ALLOC = 140,
+ SQ_ALU0_NO_OPT_ALLOC = 141,
+ SQ_ALU1_NO_OPT_ALLOC = 142,
+ SQ_TCFS_ARB_XFC_CNT = 143,
+ SQ_ALU0_ARB_XFC_CNT = 144,
+ SQ_ALU1_ARB_XFC_CNT = 145,
+ SQ_TCFS_CFS_UPDATE_CNT = 146,
+ SQ_ALU0_CFS_UPDATE_CNT = 147,
+ SQ_ALU1_CFS_UPDATE_CNT = 148,
+ SQ_VTX_PUSH_THREAD_CNT = 149,
+ SQ_VTX_POP_THREAD_CNT = 150,
+ SQ_PIX_PUSH_THREAD_CNT = 151,
+ SQ_PIX_POP_THREAD_CNT = 152,
+ SQ_PIX_TOTAL = 153,
+ SQ_PIX_KILLED = 154,
+};
+
+enum a2xx_sx_perfcnt_select {
+ SX_EXPORT_VECTORS = 0,
+ SX_DUMMY_QUADS = 1,
+ SX_ALPHA_FAIL = 2,
+ SX_RB_QUAD_BUSY = 3,
+ SX_RB_COLOR_BUSY = 4,
+ SX_RB_QUAD_STALL = 5,
+ SX_RB_COLOR_STALL = 6,
+};
+
+enum a2xx_rbbm_perfcount1_sel {
+ RBBM1_COUNT = 0,
+ RBBM1_NRT_BUSY = 1,
+ RBBM1_RB_BUSY = 2,
+ RBBM1_SQ_CNTX0_BUSY = 3,
+ RBBM1_SQ_CNTX17_BUSY = 4,
+ RBBM1_VGT_BUSY = 5,
+ RBBM1_VGT_NODMA_BUSY = 6,
+ RBBM1_PA_BUSY = 7,
+ RBBM1_SC_CNTX_BUSY = 8,
+ RBBM1_TPC_BUSY = 9,
+ RBBM1_TC_BUSY = 10,
+ RBBM1_SX_BUSY = 11,
+ RBBM1_CP_COHER_BUSY = 12,
+ RBBM1_CP_NRT_BUSY = 13,
+ RBBM1_GFX_IDLE_STALL = 14,
+ RBBM1_INTERRUPT = 15,
+};
+
+enum a2xx_cp_perfcount_sel {
+ ALWAYS_COUNT = 0,
+ TRANS_FIFO_FULL = 1,
+ TRANS_FIFO_AF = 2,
+ RCIU_PFPTRANS_WAIT = 3,
+ RCIU_NRTTRANS_WAIT = 6,
+ CSF_NRT_READ_WAIT = 8,
+ CSF_I1_FIFO_FULL = 9,
+ CSF_I2_FIFO_FULL = 10,
+ CSF_ST_FIFO_FULL = 11,
+ CSF_RING_ROQ_FULL = 13,
+ CSF_I1_ROQ_FULL = 14,
+ CSF_I2_ROQ_FULL = 15,
+ CSF_ST_ROQ_FULL = 16,
+ MIU_TAG_MEM_FULL = 18,
+ MIU_WRITECLEAN = 19,
+ MIU_NRT_WRITE_STALLED = 22,
+ MIU_NRT_READ_STALLED = 23,
+ ME_WRITE_CONFIRM_FIFO_FULL = 24,
+ ME_VS_DEALLOC_FIFO_FULL = 25,
+ ME_PS_DEALLOC_FIFO_FULL = 26,
+ ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ ME_MICRO_RB_STARVED = 30,
+ ME_MICRO_I1_STARVED = 31,
+ ME_MICRO_I2_STARVED = 32,
+ ME_MICRO_ST_STARVED = 33,
+ RCIU_RBBM_DWORD_SENT = 40,
+ ME_BUSY_CLOCKS = 41,
+ ME_WAIT_CONTEXT_AVAIL = 42,
+ PFP_TYPE0_PACKET = 43,
+ PFP_TYPE3_PACKET = 44,
+ CSF_RB_WPTR_NEQ_RPTR = 45,
+ CSF_I1_SIZE_NEQ_ZERO = 46,
+ CSF_I2_SIZE_NEQ_ZERO = 47,
+ CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a2xx_rb_perfcnt_select {
+ RBPERF_CNTX_BUSY = 0,
+ RBPERF_CNTX_BUSY_MAX = 1,
+ RBPERF_SX_QUAD_STARVED = 2,
+ RBPERF_SX_QUAD_STARVED_MAX = 3,
+ RBPERF_GA_GC_CH0_SYS_REQ = 4,
+ RBPERF_GA_GC_CH0_SYS_REQ_MAX = 5,
+ RBPERF_GA_GC_CH1_SYS_REQ = 6,
+ RBPERF_GA_GC_CH1_SYS_REQ_MAX = 7,
+ RBPERF_MH_STARVED = 8,
+ RBPERF_MH_STARVED_MAX = 9,
+ RBPERF_AZ_BC_COLOR_BUSY = 10,
+ RBPERF_AZ_BC_COLOR_BUSY_MAX = 11,
+ RBPERF_AZ_BC_Z_BUSY = 12,
+ RBPERF_AZ_BC_Z_BUSY_MAX = 13,
+ RBPERF_RB_SC_TILE_RTR_N = 14,
+ RBPERF_RB_SC_TILE_RTR_N_MAX = 15,
+ RBPERF_RB_SC_SAMP_RTR_N = 16,
+ RBPERF_RB_SC_SAMP_RTR_N_MAX = 17,
+ RBPERF_RB_SX_QUAD_RTR_N = 18,
+ RBPERF_RB_SX_QUAD_RTR_N_MAX = 19,
+ RBPERF_RB_SX_COLOR_RTR_N = 20,
+ RBPERF_RB_SX_COLOR_RTR_N_MAX = 21,
+ RBPERF_RB_SC_SAMP_LZ_BUSY = 22,
+ RBPERF_RB_SC_SAMP_LZ_BUSY_MAX = 23,
+ RBPERF_ZXP_STALL = 24,
+ RBPERF_ZXP_STALL_MAX = 25,
+ RBPERF_EVENT_PENDING = 26,
+ RBPERF_EVENT_PENDING_MAX = 27,
+ RBPERF_RB_MH_VALID = 28,
+ RBPERF_RB_MH_VALID_MAX = 29,
+ RBPERF_SX_RB_QUAD_SEND = 30,
+ RBPERF_SX_RB_COLOR_SEND = 31,
+ RBPERF_SC_RB_TILE_SEND = 32,
+ RBPERF_SC_RB_SAMPLE_SEND = 33,
+ RBPERF_SX_RB_MEM_EXPORT = 34,
+ RBPERF_SX_RB_QUAD_EVENT = 35,
+ RBPERF_SC_RB_TILE_EVENT_FILTERED = 36,
+ RBPERF_SC_RB_TILE_EVENT_ALL = 37,
+ RBPERF_RB_SC_EZ_SEND = 38,
+ RBPERF_RB_SX_INDEX_SEND = 39,
+ RBPERF_GMEM_INTFO_RD = 40,
+ RBPERF_GMEM_INTF1_RD = 41,
+ RBPERF_GMEM_INTFO_WR = 42,
+ RBPERF_GMEM_INTF1_WR = 43,
+ RBPERF_RB_CP_CONTEXT_DONE = 44,
+ RBPERF_RB_CP_CACHE_FLUSH = 45,
+ RBPERF_ZPASS_DONE = 46,
+ RBPERF_ZCMD_VALID = 47,
+ RBPERF_CCMD_VALID = 48,
+ RBPERF_ACCUM_GRANT = 49,
+ RBPERF_ACCUM_C0_GRANT = 50,
+ RBPERF_ACCUM_C1_GRANT = 51,
+ RBPERF_ACCUM_FULL_BE_WR = 52,
+ RBPERF_ACCUM_REQUEST_NO_GRANT = 53,
+ RBPERF_ACCUM_TIMEOUT_PULSE = 54,
+ RBPERF_ACCUM_LIN_TIMEOUT_PULSE = 55,
+ RBPERF_ACCUM_CAM_HIT_FLUSHING = 56,
+};
+
+enum a2xx_mh_perfcnt_select {
+ CP_R0_REQUESTS = 0,
+ CP_R1_REQUESTS = 1,
+ CP_R2_REQUESTS = 2,
+ CP_R3_REQUESTS = 3,
+ CP_R4_REQUESTS = 4,
+ CP_TOTAL_READ_REQUESTS = 5,
+ CP_TOTAL_WRITE_REQUESTS = 6,
+ CP_TOTAL_REQUESTS = 7,
+ CP_DATA_BYTES_WRITTEN = 8,
+ CP_WRITE_CLEAN_RESPONSES = 9,
+ CP_R0_READ_BURSTS_RECEIVED = 10,
+ CP_R1_READ_BURSTS_RECEIVED = 11,
+ CP_R2_READ_BURSTS_RECEIVED = 12,
+ CP_R3_READ_BURSTS_RECEIVED = 13,
+ CP_R4_READ_BURSTS_RECEIVED = 14,
+ CP_TOTAL_READ_BURSTS_RECEIVED = 15,
+ CP_R0_DATA_BEATS_READ = 16,
+ CP_R1_DATA_BEATS_READ = 17,
+ CP_R2_DATA_BEATS_READ = 18,
+ CP_R3_DATA_BEATS_READ = 19,
+ CP_R4_DATA_BEATS_READ = 20,
+ CP_TOTAL_DATA_BEATS_READ = 21,
+ VGT_R0_REQUESTS = 22,
+ VGT_R1_REQUESTS = 23,
+ VGT_TOTAL_REQUESTS = 24,
+ VGT_R0_READ_BURSTS_RECEIVED = 25,
+ VGT_R1_READ_BURSTS_RECEIVED = 26,
+ VGT_TOTAL_READ_BURSTS_RECEIVED = 27,
+ VGT_R0_DATA_BEATS_READ = 28,
+ VGT_R1_DATA_BEATS_READ = 29,
+ VGT_TOTAL_DATA_BEATS_READ = 30,
+ TC_TOTAL_REQUESTS = 31,
+ TC_ROQ_REQUESTS = 32,
+ TC_INFO_SENT = 33,
+ TC_READ_BURSTS_RECEIVED = 34,
+ TC_DATA_BEATS_READ = 35,
+ TCD_BURSTS_READ = 36,
+ RB_REQUESTS = 37,
+ RB_DATA_BYTES_WRITTEN = 38,
+ RB_WRITE_CLEAN_RESPONSES = 39,
+ AXI_READ_REQUESTS_ID_0 = 40,
+ AXI_READ_REQUESTS_ID_1 = 41,
+ AXI_READ_REQUESTS_ID_2 = 42,
+ AXI_READ_REQUESTS_ID_3 = 43,
+ AXI_READ_REQUESTS_ID_4 = 44,
+ AXI_READ_REQUESTS_ID_5 = 45,
+ AXI_READ_REQUESTS_ID_6 = 46,
+ AXI_READ_REQUESTS_ID_7 = 47,
+ AXI_TOTAL_READ_REQUESTS = 48,
+ AXI_WRITE_REQUESTS_ID_0 = 49,
+ AXI_WRITE_REQUESTS_ID_1 = 50,
+ AXI_WRITE_REQUESTS_ID_2 = 51,
+ AXI_WRITE_REQUESTS_ID_3 = 52,
+ AXI_WRITE_REQUESTS_ID_4 = 53,
+ AXI_WRITE_REQUESTS_ID_5 = 54,
+ AXI_WRITE_REQUESTS_ID_6 = 55,
+ AXI_WRITE_REQUESTS_ID_7 = 56,
+ AXI_TOTAL_WRITE_REQUESTS = 57,
+ AXI_TOTAL_REQUESTS_ID_0 = 58,
+ AXI_TOTAL_REQUESTS_ID_1 = 59,
+ AXI_TOTAL_REQUESTS_ID_2 = 60,
+ AXI_TOTAL_REQUESTS_ID_3 = 61,
+ AXI_TOTAL_REQUESTS_ID_4 = 62,
+ AXI_TOTAL_REQUESTS_ID_5 = 63,
+ AXI_TOTAL_REQUESTS_ID_6 = 64,
+ AXI_TOTAL_REQUESTS_ID_7 = 65,
+ AXI_TOTAL_REQUESTS = 66,
+ AXI_READ_CHANNEL_BURSTS_ID_0 = 67,
+ AXI_READ_CHANNEL_BURSTS_ID_1 = 68,
+ AXI_READ_CHANNEL_BURSTS_ID_2 = 69,
+ AXI_READ_CHANNEL_BURSTS_ID_3 = 70,
+ AXI_READ_CHANNEL_BURSTS_ID_4 = 71,
+ AXI_READ_CHANNEL_BURSTS_ID_5 = 72,
+ AXI_READ_CHANNEL_BURSTS_ID_6 = 73,
+ AXI_READ_CHANNEL_BURSTS_ID_7 = 74,
+ AXI_READ_CHANNEL_TOTAL_BURSTS = 75,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_0 = 76,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_1 = 77,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_2 = 78,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_3 = 79,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_4 = 80,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_5 = 81,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_6 = 82,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_7 = 83,
+ AXI_READ_CHANNEL_TOTAL_DATA_BEATS_READ = 84,
+ AXI_WRITE_CHANNEL_BURSTS_ID_0 = 85,
+ AXI_WRITE_CHANNEL_BURSTS_ID_1 = 86,
+ AXI_WRITE_CHANNEL_BURSTS_ID_2 = 87,
+ AXI_WRITE_CHANNEL_BURSTS_ID_3 = 88,
+ AXI_WRITE_CHANNEL_BURSTS_ID_4 = 89,
+ AXI_WRITE_CHANNEL_BURSTS_ID_5 = 90,
+ AXI_WRITE_CHANNEL_BURSTS_ID_6 = 91,
+ AXI_WRITE_CHANNEL_BURSTS_ID_7 = 92,
+ AXI_WRITE_CHANNEL_TOTAL_BURSTS = 93,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_0 = 94,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_1 = 95,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_2 = 96,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_3 = 97,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_4 = 98,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_5 = 99,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_6 = 100,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_7 = 101,
+ AXI_WRITE_CHANNEL_TOTAL_DATA_BYTES_WRITTEN = 102,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_0 = 103,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_1 = 104,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_2 = 105,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_3 = 106,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_4 = 107,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_5 = 108,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_6 = 109,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_7 = 110,
+ AXI_WRITE_RESPONSE_CHANNEL_TOTAL_RESPONSES = 111,
+ TOTAL_MMU_MISSES = 112,
+ MMU_READ_MISSES = 113,
+ MMU_WRITE_MISSES = 114,
+ TOTAL_MMU_HITS = 115,
+ MMU_READ_HITS = 116,
+ MMU_WRITE_HITS = 117,
+ SPLIT_MODE_TC_HITS = 118,
+ SPLIT_MODE_TC_MISSES = 119,
+ SPLIT_MODE_NON_TC_HITS = 120,
+ SPLIT_MODE_NON_TC_MISSES = 121,
+ STALL_AWAITING_TLB_MISS_FETCH = 122,
+ MMU_TLB_MISS_READ_BURSTS_RECEIVED = 123,
+ MMU_TLB_MISS_DATA_BEATS_READ = 124,
+ CP_CYCLES_HELD_OFF = 125,
+ VGT_CYCLES_HELD_OFF = 126,
+ TC_CYCLES_HELD_OFF = 127,
+ TC_ROQ_CYCLES_HELD_OFF = 128,
+ TC_CYCLES_HELD_OFF_TCD_FULL = 129,
+ RB_CYCLES_HELD_OFF = 130,
+ TOTAL_CYCLES_ANY_CLNT_HELD_OFF = 131,
+ TLB_MISS_CYCLES_HELD_OFF = 132,
+ AXI_READ_REQUEST_HELD_OFF = 133,
+ AXI_WRITE_REQUEST_HELD_OFF = 134,
+ AXI_REQUEST_HELD_OFF = 135,
+ AXI_REQUEST_HELD_OFF_INFLIGHT_LIMIT = 136,
+ AXI_WRITE_DATA_HELD_OFF = 137,
+ CP_SAME_PAGE_BANK_REQUESTS = 138,
+ VGT_SAME_PAGE_BANK_REQUESTS = 139,
+ TC_SAME_PAGE_BANK_REQUESTS = 140,
+ TC_ARB_HOLD_SAME_PAGE_BANK_REQUESTS = 141,
+ RB_SAME_PAGE_BANK_REQUESTS = 142,
+ TOTAL_SAME_PAGE_BANK_REQUESTS = 143,
+ CP_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 144,
+ VGT_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 145,
+ TC_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 146,
+ RB_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 147,
+ TOTAL_SAME_PAGE_BANK_KILLED_FAIRNESS_LIMIT = 148,
+ TOTAL_MH_READ_REQUESTS = 149,
+ TOTAL_MH_WRITE_REQUESTS = 150,
+ TOTAL_MH_REQUESTS = 151,
+ MH_BUSY = 152,
+ CP_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 153,
+ VGT_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 154,
+ TC_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 155,
+ RB_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 156,
+ TC_ROQ_N_VALID_ENTRIES = 157,
+ ARQ_N_ENTRIES = 158,
+ WDB_N_ENTRIES = 159,
+ MH_READ_LATENCY_OUTST_REQ_SUM = 160,
+ MC_READ_LATENCY_OUTST_REQ_SUM = 161,
+ MC_TOTAL_READ_REQUESTS = 162,
+ ELAPSED_CYCLES_MH_GATED_CLK = 163,
+ ELAPSED_CLK_CYCLES = 164,
+ CP_W_16B_REQUESTS = 165,
+ CP_W_32B_REQUESTS = 166,
+ TC_16B_REQUESTS = 167,
+ TC_32B_REQUESTS = 168,
+ PA_REQUESTS = 169,
+ PA_DATA_BYTES_WRITTEN = 170,
+ PA_WRITE_CLEAN_RESPONSES = 171,
+ PA_CYCLES_HELD_OFF = 172,
+ AXI_READ_REQUEST_DATA_BEATS_ID_0 = 173,
+ AXI_READ_REQUEST_DATA_BEATS_ID_1 = 174,
+ AXI_READ_REQUEST_DATA_BEATS_ID_2 = 175,
+ AXI_READ_REQUEST_DATA_BEATS_ID_3 = 176,
+ AXI_READ_REQUEST_DATA_BEATS_ID_4 = 177,
+ AXI_READ_REQUEST_DATA_BEATS_ID_5 = 178,
+ AXI_READ_REQUEST_DATA_BEATS_ID_6 = 179,
+ AXI_READ_REQUEST_DATA_BEATS_ID_7 = 180,
+ AXI_TOTAL_READ_REQUEST_DATA_BEATS = 181,
+};
+
enum adreno_mmu_clnt_beh {
BEH_NEVR = 0,
BEH_TRAN_RNG = 1,
@@ -268,9 +1118,9 @@ enum sq_tex_border_color {
};
enum sq_tex_sign {
- SQ_TEX_SIGN_UNISIGNED = 0,
+ SQ_TEX_SIGN_UNSIGNED = 0,
SQ_TEX_SIGN_SIGNED = 1,
- SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
+ SQ_TEX_SIGN_UNSIGNED_BIASED = 2,
SQ_TEX_SIGN_GAMMA = 3,
};
@@ -1842,6 +2692,10 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00002381
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_SCALE 0x00002382
+
#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
#define REG_A2XX_SQ_CONSTANT_0 0x00004000
@@ -1858,6 +2712,220 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
+#define REG_A2XX_PA_SU_PERFCOUNTER0_SELECT 0x00000c88
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_SELECT 0x00000c89
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_SELECT 0x00000c8a
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_SELECT 0x00000c8b
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_LOW 0x00000c8c
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_HI 0x00000c8d
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_LOW 0x00000c8e
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_HI 0x00000c8f
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_LOW 0x00000c90
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_HI 0x00000c91
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_LOW 0x00000c92
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_HI 0x00000c93
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_SELECT 0x00000c98
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_LOW 0x00000c99
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_HI 0x00000c9a
+
+#define REG_A2XX_VGT_PERFCOUNTER0_SELECT 0x00000c48
+
+#define REG_A2XX_VGT_PERFCOUNTER1_SELECT 0x00000c49
+
+#define REG_A2XX_VGT_PERFCOUNTER2_SELECT 0x00000c4a
+
+#define REG_A2XX_VGT_PERFCOUNTER3_SELECT 0x00000c4b
+
+#define REG_A2XX_VGT_PERFCOUNTER0_LOW 0x00000c4c
+
+#define REG_A2XX_VGT_PERFCOUNTER1_LOW 0x00000c4e
+
+#define REG_A2XX_VGT_PERFCOUNTER2_LOW 0x00000c50
+
+#define REG_A2XX_VGT_PERFCOUNTER3_LOW 0x00000c52
+
+#define REG_A2XX_VGT_PERFCOUNTER0_HI 0x00000c4d
+
+#define REG_A2XX_VGT_PERFCOUNTER1_HI 0x00000c4f
+
+#define REG_A2XX_VGT_PERFCOUNTER2_HI 0x00000c51
+
+#define REG_A2XX_VGT_PERFCOUNTER3_HI 0x00000c53
+
+#define REG_A2XX_TCR_PERFCOUNTER0_SELECT 0x00000e05
+
+#define REG_A2XX_TCR_PERFCOUNTER1_SELECT 0x00000e08
+
+#define REG_A2XX_TCR_PERFCOUNTER0_HI 0x00000e06
+
+#define REG_A2XX_TCR_PERFCOUNTER1_HI 0x00000e09
+
+#define REG_A2XX_TCR_PERFCOUNTER0_LOW 0x00000e07
+
+#define REG_A2XX_TCR_PERFCOUNTER1_LOW 0x00000e0a
+
+#define REG_A2XX_TP0_PERFCOUNTER0_SELECT 0x00000e1f
+
+#define REG_A2XX_TP0_PERFCOUNTER0_HI 0x00000e20
+
+#define REG_A2XX_TP0_PERFCOUNTER0_LOW 0x00000e21
+
+#define REG_A2XX_TP0_PERFCOUNTER1_SELECT 0x00000e22
+
+#define REG_A2XX_TP0_PERFCOUNTER1_HI 0x00000e23
+
+#define REG_A2XX_TP0_PERFCOUNTER1_LOW 0x00000e24
+
+#define REG_A2XX_TCM_PERFCOUNTER0_SELECT 0x00000e54
+
+#define REG_A2XX_TCM_PERFCOUNTER1_SELECT 0x00000e57
+
+#define REG_A2XX_TCM_PERFCOUNTER0_HI 0x00000e55
+
+#define REG_A2XX_TCM_PERFCOUNTER1_HI 0x00000e58
+
+#define REG_A2XX_TCM_PERFCOUNTER0_LOW 0x00000e56
+
+#define REG_A2XX_TCM_PERFCOUNTER1_LOW 0x00000e59
+
+#define REG_A2XX_TCF_PERFCOUNTER0_SELECT 0x00000e5a
+
+#define REG_A2XX_TCF_PERFCOUNTER1_SELECT 0x00000e5d
+
+#define REG_A2XX_TCF_PERFCOUNTER2_SELECT 0x00000e60
+
+#define REG_A2XX_TCF_PERFCOUNTER3_SELECT 0x00000e63
+
+#define REG_A2XX_TCF_PERFCOUNTER4_SELECT 0x00000e66
+
+#define REG_A2XX_TCF_PERFCOUNTER5_SELECT 0x00000e69
+
+#define REG_A2XX_TCF_PERFCOUNTER6_SELECT 0x00000e6c
+
+#define REG_A2XX_TCF_PERFCOUNTER7_SELECT 0x00000e6f
+
+#define REG_A2XX_TCF_PERFCOUNTER8_SELECT 0x00000e72
+
+#define REG_A2XX_TCF_PERFCOUNTER9_SELECT 0x00000e75
+
+#define REG_A2XX_TCF_PERFCOUNTER10_SELECT 0x00000e78
+
+#define REG_A2XX_TCF_PERFCOUNTER11_SELECT 0x00000e7b
+
+#define REG_A2XX_TCF_PERFCOUNTER0_HI 0x00000e5b
+
+#define REG_A2XX_TCF_PERFCOUNTER1_HI 0x00000e5e
+
+#define REG_A2XX_TCF_PERFCOUNTER2_HI 0x00000e61
+
+#define REG_A2XX_TCF_PERFCOUNTER3_HI 0x00000e64
+
+#define REG_A2XX_TCF_PERFCOUNTER4_HI 0x00000e67
+
+#define REG_A2XX_TCF_PERFCOUNTER5_HI 0x00000e6a
+
+#define REG_A2XX_TCF_PERFCOUNTER6_HI 0x00000e6d
+
+#define REG_A2XX_TCF_PERFCOUNTER7_HI 0x00000e70
+
+#define REG_A2XX_TCF_PERFCOUNTER8_HI 0x00000e73
+
+#define REG_A2XX_TCF_PERFCOUNTER9_HI 0x00000e76
+
+#define REG_A2XX_TCF_PERFCOUNTER10_HI 0x00000e79
+
+#define REG_A2XX_TCF_PERFCOUNTER11_HI 0x00000e7c
+
+#define REG_A2XX_TCF_PERFCOUNTER0_LOW 0x00000e5c
+
+#define REG_A2XX_TCF_PERFCOUNTER1_LOW 0x00000e5f
+
+#define REG_A2XX_TCF_PERFCOUNTER2_LOW 0x00000e62
+
+#define REG_A2XX_TCF_PERFCOUNTER3_LOW 0x00000e65
+
+#define REG_A2XX_TCF_PERFCOUNTER4_LOW 0x00000e68
+
+#define REG_A2XX_TCF_PERFCOUNTER5_LOW 0x00000e6b
+
+#define REG_A2XX_TCF_PERFCOUNTER6_LOW 0x00000e6e
+
+#define REG_A2XX_TCF_PERFCOUNTER7_LOW 0x00000e71
+
+#define REG_A2XX_TCF_PERFCOUNTER8_LOW 0x00000e74
+
+#define REG_A2XX_TCF_PERFCOUNTER9_LOW 0x00000e77
+
+#define REG_A2XX_TCF_PERFCOUNTER10_LOW 0x00000e7a
+
+#define REG_A2XX_TCF_PERFCOUNTER11_LOW 0x00000e7d
+
+#define REG_A2XX_SQ_PERFCOUNTER0_SELECT 0x00000dc8
+
+#define REG_A2XX_SQ_PERFCOUNTER1_SELECT 0x00000dc9
+
+#define REG_A2XX_SQ_PERFCOUNTER2_SELECT 0x00000dca
+
+#define REG_A2XX_SQ_PERFCOUNTER3_SELECT 0x00000dcb
+
+#define REG_A2XX_SQ_PERFCOUNTER0_LOW 0x00000dcc
+
+#define REG_A2XX_SQ_PERFCOUNTER0_HI 0x00000dcd
+
+#define REG_A2XX_SQ_PERFCOUNTER1_LOW 0x00000dce
+
+#define REG_A2XX_SQ_PERFCOUNTER1_HI 0x00000dcf
+
+#define REG_A2XX_SQ_PERFCOUNTER2_LOW 0x00000dd0
+
+#define REG_A2XX_SQ_PERFCOUNTER2_HI 0x00000dd1
+
+#define REG_A2XX_SQ_PERFCOUNTER3_LOW 0x00000dd2
+
+#define REG_A2XX_SQ_PERFCOUNTER3_HI 0x00000dd3
+
+#define REG_A2XX_SX_PERFCOUNTER0_SELECT 0x00000dd4
+
+#define REG_A2XX_SX_PERFCOUNTER0_LOW 0x00000dd8
+
+#define REG_A2XX_SX_PERFCOUNTER0_HI 0x00000dd9
+
+#define REG_A2XX_MH_PERFCOUNTER0_SELECT 0x00000a46
+
+#define REG_A2XX_MH_PERFCOUNTER1_SELECT 0x00000a4a
+
+#define REG_A2XX_MH_PERFCOUNTER0_CONFIG 0x00000a47
+
+#define REG_A2XX_MH_PERFCOUNTER1_CONFIG 0x00000a4b
+
+#define REG_A2XX_MH_PERFCOUNTER0_LOW 0x00000a48
+
+#define REG_A2XX_MH_PERFCOUNTER1_LOW 0x00000a4c
+
+#define REG_A2XX_MH_PERFCOUNTER0_HI 0x00000a49
+
+#define REG_A2XX_MH_PERFCOUNTER1_HI 0x00000a4d
+
+#define REG_A2XX_RB_PERFCOUNTER0_SELECT 0x00000f04
+
+#define REG_A2XX_RB_PERFCOUNTER0_LOW 0x00000f08
+
+#define REG_A2XX_RB_PERFCOUNTER0_HI 0x00000f09
+
#define REG_A2XX_SQ_TEX_0 0x00000000
#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
@@ -1913,7 +2981,7 @@ static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
{
return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
}
-#define A2XX_SQ_TEX_0_TILED 0x00000002
+#define A2XX_SQ_TEX_0_TILED 0x80000000
#define REG_A2XX_SQ_TEX_1 0x00000001
#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
@@ -2001,7 +3069,7 @@ static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
}
#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
-static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(int32_t val)
{
return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 60f6472a3e58..6021f8d9efd1 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -408,7 +408,7 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- SZ_16M + 0xfff * SZ_64K);
+ 0xfff * SZ_64K);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 17059f242a98..16f9ef453bf8 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -48,7 +50,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a3xx_tile_mode {
LINEAR = 0,
+ TILE_4X4 = 1,
TILE_32X32 = 2,
+ TILE_4X2 = 3,
};
enum a3xx_state_block_id {
@@ -123,6 +127,7 @@ enum a3xx_vtx_fmt {
VFMT_2_10_10_10_UNORM = 61,
VFMT_2_10_10_10_SINT = 62,
VFMT_2_10_10_10_SNORM = 63,
+ VFMT_NONE = 255,
};
enum a3xx_tex_fmt {
@@ -206,15 +211,7 @@ enum a3xx_tex_fmt {
TFMT_ETC2_RGBA8 = 116,
TFMT_ETC2_RGB8A1 = 117,
TFMT_ETC2_RGB8 = 118,
-};
-
-enum a3xx_tex_fetchsize {
- TFETCH_DISABLE = 0,
- TFETCH_1_BYTE = 1,
- TFETCH_2_BYTE = 2,
- TFETCH_4_BYTE = 3,
- TFETCH_8_BYTE = 4,
- TFETCH_16_BYTE = 5,
+ TFMT_NONE = 255,
};
enum a3xx_color_fmt {
@@ -228,8 +225,8 @@ enum a3xx_color_fmt {
RB_R8G8B8A8_SINT = 11,
RB_R8G8_UNORM = 12,
RB_R8G8_SNORM = 13,
- RB_R8_UINT = 14,
- RB_R8_SINT = 15,
+ RB_R8G8_UINT = 14,
+ RB_R8G8_SINT = 15,
RB_R10G10B10A2_UNORM = 16,
RB_A2R10G10B10_UNORM = 17,
RB_R10G10B10A2_UINT = 18,
@@ -261,6 +258,7 @@ enum a3xx_color_fmt {
RB_R32_UINT = 56,
RB_R32G32_UINT = 57,
RB_R32G32B32A32_UINT = 59,
+ RB_NONE = 255,
};
enum a3xx_cp_perfcounter_select {
@@ -932,6 +930,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER 0x00002000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID 0x00004000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID 0x00008000
#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
@@ -1170,10 +1171,12 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
}
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
-#define A3XX_RB_RENDER_CONTROL_XCOORD 0x00004000
-#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000
-#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000
-#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000
+#define A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK 0x0003c000
+#define A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT 14
+static inline uint32_t A3XX_RB_RENDER_CONTROL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT) & A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK;
+}
#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000
#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
@@ -1755,11 +1758,29 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
}
#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
-#define A3XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
-#define A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
-static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK 0x000000ff
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK 0x0000ff00
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT 8
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID(uint32_t val)
{
- return ((val) << A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK;
}
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
@@ -1944,8 +1965,6 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
-#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
-
static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
@@ -3107,7 +3126,12 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
}
#define REG_A3XX_TEX_CONST_0 0x00000000
-#define A3XX_TEX_CONST_0_TILED 0x00000001
+#define A3XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A3XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_0_TILE_MODE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_TEX_CONST_0_TILE_MODE__SHIFT) & A3XX_TEX_CONST_0_TILE_MODE__MASK;
+}
#define A3XX_TEX_CONST_0_SRGB 0x00000004
#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
@@ -3172,11 +3196,11 @@ static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
{
return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
}
-#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000
-#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28
-static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
+#define A3XX_TEX_CONST_1_PITCHALIGN__MASK 0xf0000000
+#define A3XX_TEX_CONST_1_PITCHALIGN__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_1_PITCHALIGN(uint32_t val)
{
- return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
+ return ((val) << A3XX_TEX_CONST_1_PITCHALIGN__SHIFT) & A3XX_TEX_CONST_1_PITCHALIGN__MASK;
}
#define REG_A3XX_TEX_CONST_2 0x00000002
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 9b51e25a9583..a7eaf2c83fe2 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -91,6 +93,7 @@ enum a4xx_color_fmt {
RB4_R32G32B32A32_FLOAT = 60,
RB4_R32G32B32A32_UINT = 61,
RB4_R32G32B32A32_SINT = 62,
+ RB4_NONE = 255,
};
enum a4xx_tile_mode {
@@ -161,6 +164,7 @@ enum a4xx_vtx_fmt {
VFMT4_2_10_10_10_UNORM = 61,
VFMT4_2_10_10_10_SINT = 62,
VFMT4_2_10_10_10_SNORM = 63,
+ VFMT4_NONE = 255,
};
enum a4xx_tex_fmt {
@@ -248,14 +252,7 @@ enum a4xx_tex_fmt {
TFMT4_ASTC_10x10 = 122,
TFMT4_ASTC_12x10 = 123,
TFMT4_ASTC_12x12 = 124,
-};
-
-enum a4xx_tex_fetchsize {
- TFETCH4_1_BYTE = 0,
- TFETCH4_2_BYTE = 1,
- TFETCH4_4_BYTE = 2,
- TFETCH4_8_BYTE = 3,
- TFETCH4_16_BYTE = 4,
+ TFMT4_NONE = 255,
};
enum a4xx_depth_format {
@@ -949,10 +946,12 @@ static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
}
#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3
-#define A4XX_RB_RENDER_CONTROL2_XCOORD 0x00000001
-#define A4XX_RB_RENDER_CONTROL2_YCOORD 0x00000002
-#define A4XX_RB_RENDER_CONTROL2_ZCOORD 0x00000004
-#define A4XX_RB_RENDER_CONTROL2_WCOORD 0x00000008
+#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK 0x0000000f
+#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT 0
+static inline uint32_t A4XX_RB_RENDER_CONTROL2_COORD_MASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT) & A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK;
+}
#define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK 0x00000010
#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020
#define A4XX_RB_RENDER_CONTROL2_SAMPLEID 0x00000040
@@ -963,7 +962,10 @@ static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val)
return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK;
}
#define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR 0x00000800
-#define A4XX_RB_RENDER_CONTROL2_VARYING 0x00001000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_PIXEL 0x00001000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_CENTROID 0x00002000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_SAMPLE 0x00004000
+#define A4XX_RB_RENDER_CONTROL2_SIZE 0x00008000
static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
@@ -1877,10 +1879,6 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x
#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
-#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
-
-#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
-
#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116
#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117
@@ -2061,8 +2059,6 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a
-#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
-
#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171
@@ -2210,8 +2206,18 @@ static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
{
return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
}
-#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
-#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29
+static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30
+static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_READ__MASK;
+}
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -3151,8 +3157,9 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
-#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
-#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
+#define REG_A4XX_GRAS_CNTL 0x00002003
+#define A4XX_GRAS_CNTL_IJ_PERSP 0x00000001
+#define A4XX_GRAS_CNTL_IJ_LINEAR 0x00000002
#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004
#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
@@ -3524,14 +3531,44 @@ static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val)
}
#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
-#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
-#define A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
-static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
{
- return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
}
#define REG_A4XX_HLSQ_CONTROL_4_REG 0x000023c4
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
@@ -4115,11 +4152,11 @@ static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
}
#define REG_A4XX_TEX_CONST_2 0x00000002
-#define A4XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
-#define A4XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
-static inline uint32_t A4XX_TEX_CONST_2_FETCHSIZE(enum a4xx_tex_fetchsize val)
+#define A4XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A4XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
{
- return ((val) << A4XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A4XX_TEX_CONST_2_FETCHSIZE__MASK;
+ return ((val) << A4XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A4XX_TEX_CONST_2_PITCHALIGN__MASK;
}
#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 4a61d4e72c98..346cc6ff3a36 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/ubuntu/envytools/envytools/rnndb/./adreno.xml ( 501 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a2xx.xml ( 79608 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_common.xml ( 14239 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_pm4.xml ( 43155 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a5xx.xml ( 147291 bytes, from 2019-05-29 14:51:41)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx.xml ( 148461 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2019-05-29 01:28:15)
-- /home/ubuntu/envytools/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2019-05-29 01:28:15)
-
-Copyright (C) 2013-2019 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -91,6 +93,7 @@ enum a5xx_color_fmt {
RB5_R32G32B32A32_FLOAT = 130,
RB5_R32G32B32A32_UINT = 131,
RB5_R32G32B32A32_SINT = 132,
+ RB5_NONE = 255,
};
enum a5xx_tile_mode {
@@ -165,6 +168,7 @@ enum a5xx_vtx_fmt {
VFMT5_32_32_32_32_UINT = 131,
VFMT5_32_32_32_32_SINT = 132,
VFMT5_32_32_32_32_FIXED = 133,
+ VFMT5_NONE = 255,
};
enum a5xx_tex_fmt {
@@ -250,14 +254,7 @@ enum a5xx_tex_fmt {
TFMT5_ASTC_10x10 = 204,
TFMT5_ASTC_12x10 = 205,
TFMT5_ASTC_12x12 = 206,
-};
-
-enum a5xx_tex_fetchsize {
- TFETCH5_1_BYTE = 0,
- TFETCH5_2_BYTE = 1,
- TFETCH5_4_BYTE = 2,
- TFETCH5_8_BYTE = 3,
- TFETCH5_16_BYTE = 4,
+ TFMT5_NONE = 255,
};
enum a5xx_depth_format {
@@ -1052,8 +1049,18 @@ static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
{
return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
}
-#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
-#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29
+static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30
+static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_READ__MASK;
+}
#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
@@ -1825,37 +1832,192 @@ static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
#define REG_A5XX_RBBM_STATUS 0x000004f5
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
-#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
-#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
-#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
-#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
-#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
-#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
-#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
-#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
-#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
-#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
-#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
-#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
-#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
-#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
-#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
-#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
-#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
-#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
-#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
-#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
-#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
-#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
-#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
-#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
-#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
-#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
-#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
-#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
-#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT 31
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK;
+}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK 0x40000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT 30
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK;
+}
+#define A5XX_RBBM_STATUS_HLSQ_BUSY__MASK 0x20000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT 29
+static inline uint32_t A5XX_RBBM_STATUS_HLSQ_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT) & A5XX_RBBM_STATUS_HLSQ_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VSC_BUSY__MASK 0x10000000
+#define A5XX_RBBM_STATUS_VSC_BUSY__SHIFT 28
+static inline uint32_t A5XX_RBBM_STATUS_VSC_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VSC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VSC_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TPL1_BUSY__MASK 0x08000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT 27
+static inline uint32_t A5XX_RBBM_STATUS_TPL1_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT) & A5XX_RBBM_STATUS_TPL1_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_SP_BUSY__MASK 0x04000000
+#define A5XX_RBBM_STATUS_SP_BUSY__SHIFT 26
+static inline uint32_t A5XX_RBBM_STATUS_SP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_SP_BUSY__SHIFT) & A5XX_RBBM_STATUS_SP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_UCHE_BUSY__MASK 0x02000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT 25
+static inline uint32_t A5XX_RBBM_STATUS_UCHE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_UCHE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VPC_BUSY__MASK 0x01000000
+#define A5XX_RBBM_STATUS_VPC_BUSY__SHIFT 24
+static inline uint32_t A5XX_RBBM_STATUS_VPC_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VPC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VPC_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VFDP_BUSY__MASK 0x00800000
+#define A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT 23
+static inline uint32_t A5XX_RBBM_STATUS_VFDP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFDP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VFD_BUSY__MASK 0x00400000
+#define A5XX_RBBM_STATUS_VFD_BUSY__SHIFT 22
+static inline uint32_t A5XX_RBBM_STATUS_VFD_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VFD_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFD_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TESS_BUSY__MASK 0x00200000
+#define A5XX_RBBM_STATUS_TESS_BUSY__SHIFT 21
+static inline uint32_t A5XX_RBBM_STATUS_TESS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TESS_BUSY__SHIFT) & A5XX_RBBM_STATUS_TESS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK 0x00100000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT 20
+static inline uint32_t A5XX_RBBM_STATUS_PC_VSD_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK 0x00080000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT 19
+static inline uint32_t A5XX_RBBM_STATUS_PC_DCALL_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK 0x00040000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT 18
+static inline uint32_t A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_DCOM_BUSY__MASK 0x00020000
+#define A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT 17
+static inline uint32_t A5XX_RBBM_STATUS_DCOM_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT) & A5XX_RBBM_STATUS_DCOM_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_COM_BUSY__MASK 0x00010000
+#define A5XX_RBBM_STATUS_COM_BUSY__SHIFT 16
+static inline uint32_t A5XX_RBBM_STATUS_COM_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_COM_BUSY__SHIFT) & A5XX_RBBM_STATUS_COM_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_LRZ_BUZY__MASK 0x00008000
+#define A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT 15
+static inline uint32_t A5XX_RBBM_STATUS_LRZ_BUZY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT) & A5XX_RBBM_STATUS_LRZ_BUZY__MASK;
+}
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK 0x00004000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT 14
+static inline uint32_t A5XX_RBBM_STATUS_A2D_DSP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT) & A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK 0x00002000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT 13
+static inline uint32_t A5XX_RBBM_STATUS_CCUFCHE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_RB_BUSY__MASK 0x00001000
+#define A5XX_RBBM_STATUS_RB_BUSY__SHIFT 12
+static inline uint32_t A5XX_RBBM_STATUS_RB_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_RB_BUSY__SHIFT) & A5XX_RBBM_STATUS_RB_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_RAS_BUSY__MASK 0x00000800
+#define A5XX_RBBM_STATUS_RAS_BUSY__SHIFT 11
+static inline uint32_t A5XX_RBBM_STATUS_RAS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_RAS_BUSY__SHIFT) & A5XX_RBBM_STATUS_RAS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TSE_BUSY__MASK 0x00000400
+#define A5XX_RBBM_STATUS_TSE_BUSY__SHIFT 10
+static inline uint32_t A5XX_RBBM_STATUS_TSE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TSE_BUSY__SHIFT) & A5XX_RBBM_STATUS_TSE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VBIF_BUSY__MASK 0x00000200
+#define A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT 9
+static inline uint32_t A5XX_RBBM_STATUS_VBIF_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT) & A5XX_RBBM_STATUS_VBIF_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK 0x00000100
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT 8
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT 7
+static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_BUSY__MASK 0x00000040
+#define A5XX_RBBM_STATUS_CP_BUSY__SHIFT 6
+static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK 0x00000020
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT 5
+static inline uint32_t A5XX_RBBM_STATUS_GPMU_MASTER_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK 0x00000010
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT 4
+static inline uint32_t A5XX_RBBM_STATUS_CP_CRASH_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK 0x00000008
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT 3
+static inline uint32_t A5XX_RBBM_STATUS_CP_ETS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK 0x00000004
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT 2
+static inline uint32_t A5XX_RBBM_STATUS_CP_PFP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_ME_BUSY__MASK 0x00000002
+#define A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT 1
+static inline uint32_t A5XX_RBBM_STATUS_CP_ME_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ME_BUSY__MASK;
+}
#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
#define REG_A5XX_RBBM_STATUS3 0x00000530
@@ -1884,14 +2046,6 @@ static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
-
-#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
-
#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed
@@ -2455,8 +2609,6 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
-#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
-
#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
@@ -2659,12 +2811,16 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_UNKNOWN_E004 0x0000e004
#define REG_A5XX_GRAS_CNTL 0x0000e005
-#define A5XX_GRAS_CNTL_VARYING 0x00000001
-#define A5XX_GRAS_CNTL_UNK3 0x00000008
-#define A5XX_GRAS_CNTL_XCOORD 0x00000040
-#define A5XX_GRAS_CNTL_YCOORD 0x00000080
-#define A5XX_GRAS_CNTL_ZCOORD 0x00000100
-#define A5XX_GRAS_CNTL_WCOORD 0x00000200
+#define A5XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001
+#define A5XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002
+#define A5XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004
+#define A5XX_GRAS_CNTL_SIZE 0x00000008
+#define A5XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0
+#define A5XX_GRAS_CNTL_COORD_MASK__SHIFT 6
+static inline uint32_t A5XX_GRAS_CNTL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CNTL_COORD_MASK__SHIFT) & A5XX_GRAS_CNTL_COORD_MASK__MASK;
+}
#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
@@ -2991,12 +3147,16 @@ static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
-#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
-#define A5XX_RB_RENDER_CONTROL0_UNK3 0x00000008
-#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
-#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
-#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
-#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004
+#define A5XX_RB_RENDER_CONTROL0_SIZE 0x00000008
+#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0
+#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6
+static inline uint32_t A5XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
+}
#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
@@ -4450,16 +4610,52 @@ static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
{
return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
}
+#define A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK;
+}
#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
-#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
-#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
}
#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
@@ -4855,10 +5051,26 @@ static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
+#define REG_A5XX_RB_2D_SRC_FLAGS_PITCH 0x00002142
+#define A5XX_RB_2D_SRC_FLAGS_PITCH__MASK 0xffffffff
+#define A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK;
+}
+
#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+#define REG_A5XX_RB_2D_DST_FLAGS_PITCH 0x00002145
+#define A5XX_RB_2D_DST_FLAGS_PITCH__MASK 0xffffffff
+#define A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK;
+}
+
#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
@@ -5059,11 +5271,11 @@ static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
}
#define REG_A5XX_TEX_CONST_2 0x00000002
-#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
-#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
-static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+#define A5XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
{
- return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+ return ((val) << A5XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A5XX_TEX_CONST_2_PITCHALIGN__MASK;
}
#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
#define A5XX_TEX_CONST_2_PITCH__SHIFT 7
@@ -5085,6 +5297,13 @@ static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
{
return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
}
+#define A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
+#define A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
+static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_TILE_ALL 0x08000000
#define A5XX_TEX_CONST_3_FLAG 0x10000000
#define REG_A5XX_TEX_CONST_4 0x00000004
@@ -5197,5 +5416,21 @@ static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
}
+#define REG_A5XX_UBO_0 0x00000000
+#define A5XX_UBO_0_BASE_LO__MASK 0xffffffff
+#define A5XX_UBO_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_UBO_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_UBO_0_BASE_LO__SHIFT) & A5XX_UBO_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_UBO_1 0x00000001
+#define A5XX_UBO_1_BASE_HI__MASK 0x0001ffff
+#define A5XX_UBO_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_UBO_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_UBO_1_BASE_HI__SHIFT) & A5XX_UBO_1_BASE_HI__MASK;
+}
+
#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 0e1933ea12f2..9e63a190642c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -186,7 +186,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
* timestamp is written to the memory and then triggers the interrupt
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
- OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
+ CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
@@ -730,7 +731,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
*/
if (adreno_is_a530(adreno_gpu)) {
OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
- OUT_RING(gpu->rb[0], 0x0F);
+ OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
gpu->funcs->flush(gpu, gpu->rb[0]);
if (!a5xx_idle(gpu, gpu->rb[0]))
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index 47840b73cdda..920c5e6b8e96 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml ( 161969 bytes, from 2019-11-29 07:18:16)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2019 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -46,219 +48,134 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum a6xx_color_fmt {
- RB6_A8_UNORM = 2,
- RB6_R8_UNORM = 3,
- RB6_R8_SNORM = 4,
- RB6_R8_UINT = 5,
- RB6_R8_SINT = 6,
- RB6_R4G4B4A4_UNORM = 8,
- RB6_R5G5B5A1_UNORM = 10,
- RB6_R5G6B5_UNORM = 14,
- RB6_R8G8_UNORM = 15,
- RB6_R8G8_SNORM = 16,
- RB6_R8G8_UINT = 17,
- RB6_R8G8_SINT = 18,
- RB6_R16_UNORM = 21,
- RB6_R16_SNORM = 22,
- RB6_R16_FLOAT = 23,
- RB6_R16_UINT = 24,
- RB6_R16_SINT = 25,
- RB6_R8G8B8A8_UNORM = 48,
- RB6_R8G8B8_UNORM = 49,
- RB6_R8G8B8A8_SNORM = 50,
- RB6_R8G8B8A8_UINT = 51,
- RB6_R8G8B8A8_SINT = 52,
- RB6_R10G10B10A2_UNORM = 55,
- RB6_R10G10B10A2_UINT = 58,
- RB6_R11G11B10_FLOAT = 66,
- RB6_R16G16_UNORM = 67,
- RB6_R16G16_SNORM = 68,
- RB6_R16G16_FLOAT = 69,
- RB6_R16G16_UINT = 70,
- RB6_R16G16_SINT = 71,
- RB6_R32_FLOAT = 74,
- RB6_R32_UINT = 75,
- RB6_R32_SINT = 76,
- RB6_R16G16B16A16_UNORM = 96,
- RB6_R16G16B16A16_SNORM = 97,
- RB6_R16G16B16A16_FLOAT = 98,
- RB6_R16G16B16A16_UINT = 99,
- RB6_R16G16B16A16_SINT = 100,
- RB6_R32G32_FLOAT = 103,
- RB6_R32G32_UINT = 104,
- RB6_R32G32_SINT = 105,
- RB6_R32G32B32A32_FLOAT = 130,
- RB6_R32G32B32A32_UINT = 131,
- RB6_R32G32B32A32_SINT = 132,
- RB6_X8Z24_UNORM = 160,
-};
-
enum a6xx_tile_mode {
TILE6_LINEAR = 0,
TILE6_2 = 2,
TILE6_3 = 3,
};
-enum a6xx_vtx_fmt {
- VFMT6_8_UNORM = 3,
- VFMT6_8_SNORM = 4,
- VFMT6_8_UINT = 5,
- VFMT6_8_SINT = 6,
- VFMT6_8_8_UNORM = 15,
- VFMT6_8_8_SNORM = 16,
- VFMT6_8_8_UINT = 17,
- VFMT6_8_8_SINT = 18,
- VFMT6_16_UNORM = 21,
- VFMT6_16_SNORM = 22,
- VFMT6_16_FLOAT = 23,
- VFMT6_16_UINT = 24,
- VFMT6_16_SINT = 25,
- VFMT6_8_8_8_UNORM = 33,
- VFMT6_8_8_8_SNORM = 34,
- VFMT6_8_8_8_UINT = 35,
- VFMT6_8_8_8_SINT = 36,
- VFMT6_8_8_8_8_UNORM = 48,
- VFMT6_8_8_8_8_SNORM = 50,
- VFMT6_8_8_8_8_UINT = 51,
- VFMT6_8_8_8_8_SINT = 52,
- VFMT6_10_10_10_2_UNORM = 54,
- VFMT6_10_10_10_2_SNORM = 57,
- VFMT6_10_10_10_2_UINT = 58,
- VFMT6_10_10_10_2_SINT = 59,
- VFMT6_11_11_10_FLOAT = 66,
- VFMT6_16_16_UNORM = 67,
- VFMT6_16_16_SNORM = 68,
- VFMT6_16_16_FLOAT = 69,
- VFMT6_16_16_UINT = 70,
- VFMT6_16_16_SINT = 71,
- VFMT6_32_UNORM = 72,
- VFMT6_32_SNORM = 73,
- VFMT6_32_FLOAT = 74,
- VFMT6_32_UINT = 75,
- VFMT6_32_SINT = 76,
- VFMT6_32_FIXED = 77,
- VFMT6_16_16_16_UNORM = 88,
- VFMT6_16_16_16_SNORM = 89,
- VFMT6_16_16_16_FLOAT = 90,
- VFMT6_16_16_16_UINT = 91,
- VFMT6_16_16_16_SINT = 92,
- VFMT6_16_16_16_16_UNORM = 96,
- VFMT6_16_16_16_16_SNORM = 97,
- VFMT6_16_16_16_16_FLOAT = 98,
- VFMT6_16_16_16_16_UINT = 99,
- VFMT6_16_16_16_16_SINT = 100,
- VFMT6_32_32_UNORM = 101,
- VFMT6_32_32_SNORM = 102,
- VFMT6_32_32_FLOAT = 103,
- VFMT6_32_32_UINT = 104,
- VFMT6_32_32_SINT = 105,
- VFMT6_32_32_FIXED = 106,
- VFMT6_32_32_32_UNORM = 112,
- VFMT6_32_32_32_SNORM = 113,
- VFMT6_32_32_32_UINT = 114,
- VFMT6_32_32_32_SINT = 115,
- VFMT6_32_32_32_FLOAT = 116,
- VFMT6_32_32_32_FIXED = 117,
- VFMT6_32_32_32_32_UNORM = 128,
- VFMT6_32_32_32_32_SNORM = 129,
- VFMT6_32_32_32_32_FLOAT = 130,
- VFMT6_32_32_32_32_UINT = 131,
- VFMT6_32_32_32_32_SINT = 132,
- VFMT6_32_32_32_32_FIXED = 133,
+enum a6xx_format {
+ FMT6_A8_UNORM = 2,
+ FMT6_8_UNORM = 3,
+ FMT6_8_SNORM = 4,
+ FMT6_8_UINT = 5,
+ FMT6_8_SINT = 6,
+ FMT6_4_4_4_4_UNORM = 8,
+ FMT6_5_5_5_1_UNORM = 10,
+ FMT6_1_5_5_5_UNORM = 12,
+ FMT6_5_6_5_UNORM = 14,
+ FMT6_8_8_UNORM = 15,
+ FMT6_8_8_SNORM = 16,
+ FMT6_8_8_UINT = 17,
+ FMT6_8_8_SINT = 18,
+ FMT6_L8_A8_UNORM = 19,
+ FMT6_16_UNORM = 21,
+ FMT6_16_SNORM = 22,
+ FMT6_16_FLOAT = 23,
+ FMT6_16_UINT = 24,
+ FMT6_16_SINT = 25,
+ FMT6_8_8_8_UNORM = 33,
+ FMT6_8_8_8_SNORM = 34,
+ FMT6_8_8_8_UINT = 35,
+ FMT6_8_8_8_SINT = 36,
+ FMT6_8_8_8_8_UNORM = 48,
+ FMT6_8_8_8_X8_UNORM = 49,
+ FMT6_8_8_8_8_SNORM = 50,
+ FMT6_8_8_8_8_UINT = 51,
+ FMT6_8_8_8_8_SINT = 52,
+ FMT6_9_9_9_E5_FLOAT = 53,
+ FMT6_10_10_10_2_UNORM = 54,
+ FMT6_10_10_10_2_UNORM_DEST = 55,
+ FMT6_10_10_10_2_SNORM = 57,
+ FMT6_10_10_10_2_UINT = 58,
+ FMT6_10_10_10_2_SINT = 59,
+ FMT6_11_11_10_FLOAT = 66,
+ FMT6_16_16_UNORM = 67,
+ FMT6_16_16_SNORM = 68,
+ FMT6_16_16_FLOAT = 69,
+ FMT6_16_16_UINT = 70,
+ FMT6_16_16_SINT = 71,
+ FMT6_32_UNORM = 72,
+ FMT6_32_SNORM = 73,
+ FMT6_32_FLOAT = 74,
+ FMT6_32_UINT = 75,
+ FMT6_32_SINT = 76,
+ FMT6_32_FIXED = 77,
+ FMT6_16_16_16_UNORM = 88,
+ FMT6_16_16_16_SNORM = 89,
+ FMT6_16_16_16_FLOAT = 90,
+ FMT6_16_16_16_UINT = 91,
+ FMT6_16_16_16_SINT = 92,
+ FMT6_16_16_16_16_UNORM = 96,
+ FMT6_16_16_16_16_SNORM = 97,
+ FMT6_16_16_16_16_FLOAT = 98,
+ FMT6_16_16_16_16_UINT = 99,
+ FMT6_16_16_16_16_SINT = 100,
+ FMT6_32_32_UNORM = 101,
+ FMT6_32_32_SNORM = 102,
+ FMT6_32_32_FLOAT = 103,
+ FMT6_32_32_UINT = 104,
+ FMT6_32_32_SINT = 105,
+ FMT6_32_32_FIXED = 106,
+ FMT6_32_32_32_UNORM = 112,
+ FMT6_32_32_32_SNORM = 113,
+ FMT6_32_32_32_UINT = 114,
+ FMT6_32_32_32_SINT = 115,
+ FMT6_32_32_32_FLOAT = 116,
+ FMT6_32_32_32_FIXED = 117,
+ FMT6_32_32_32_32_UNORM = 128,
+ FMT6_32_32_32_32_SNORM = 129,
+ FMT6_32_32_32_32_FLOAT = 130,
+ FMT6_32_32_32_32_UINT = 131,
+ FMT6_32_32_32_32_SINT = 132,
+ FMT6_32_32_32_32_FIXED = 133,
+ FMT6_G8R8B8R8_422_UNORM = 140,
+ FMT6_R8G8R8B8_422_UNORM = 141,
+ FMT6_R8_G8B8_2PLANE_420_UNORM = 142,
+ FMT6_R8_G8_B8_3PLANE_420_UNORM = 144,
+ FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = 145,
+ FMT6_8_PLANE_UNORM = 148,
+ FMT6_Z24_UNORM_S8_UINT = 160,
+ FMT6_ETC2_RG11_UNORM = 171,
+ FMT6_ETC2_RG11_SNORM = 172,
+ FMT6_ETC2_R11_UNORM = 173,
+ FMT6_ETC2_R11_SNORM = 174,
+ FMT6_ETC1 = 175,
+ FMT6_ETC2_RGB8 = 176,
+ FMT6_ETC2_RGBA8 = 177,
+ FMT6_ETC2_RGB8A1 = 178,
+ FMT6_DXT1 = 179,
+ FMT6_DXT3 = 180,
+ FMT6_DXT5 = 181,
+ FMT6_RGTC1_UNORM = 183,
+ FMT6_RGTC1_SNORM = 184,
+ FMT6_RGTC2_UNORM = 187,
+ FMT6_RGTC2_SNORM = 188,
+ FMT6_BPTC_UFLOAT = 190,
+ FMT6_BPTC_FLOAT = 191,
+ FMT6_BPTC = 192,
+ FMT6_ASTC_4x4 = 193,
+ FMT6_ASTC_5x4 = 194,
+ FMT6_ASTC_5x5 = 195,
+ FMT6_ASTC_6x5 = 196,
+ FMT6_ASTC_6x6 = 197,
+ FMT6_ASTC_8x5 = 198,
+ FMT6_ASTC_8x6 = 199,
+ FMT6_ASTC_8x8 = 200,
+ FMT6_ASTC_10x5 = 201,
+ FMT6_ASTC_10x6 = 202,
+ FMT6_ASTC_10x8 = 203,
+ FMT6_ASTC_10x10 = 204,
+ FMT6_ASTC_12x10 = 205,
+ FMT6_ASTC_12x12 = 206,
+ FMT6_S8Z24_UINT = 234,
+ FMT6_NONE = 255,
};
-enum a6xx_tex_fmt {
- TFMT6_A8_UNORM = 2,
- TFMT6_8_UNORM = 3,
- TFMT6_8_SNORM = 4,
- TFMT6_8_UINT = 5,
- TFMT6_8_SINT = 6,
- TFMT6_4_4_4_4_UNORM = 8,
- TFMT6_5_5_5_1_UNORM = 10,
- TFMT6_5_6_5_UNORM = 14,
- TFMT6_8_8_UNORM = 15,
- TFMT6_8_8_SNORM = 16,
- TFMT6_8_8_UINT = 17,
- TFMT6_8_8_SINT = 18,
- TFMT6_L8_A8_UNORM = 19,
- TFMT6_16_UNORM = 21,
- TFMT6_16_SNORM = 22,
- TFMT6_16_FLOAT = 23,
- TFMT6_16_UINT = 24,
- TFMT6_16_SINT = 25,
- TFMT6_8_8_8_8_UNORM = 48,
- TFMT6_8_8_8_UNORM = 49,
- TFMT6_8_8_8_8_SNORM = 50,
- TFMT6_8_8_8_8_UINT = 51,
- TFMT6_8_8_8_8_SINT = 52,
- TFMT6_9_9_9_E5_FLOAT = 53,
- TFMT6_10_10_10_2_UNORM = 54,
- TFMT6_10_10_10_2_UINT = 58,
- TFMT6_11_11_10_FLOAT = 66,
- TFMT6_16_16_UNORM = 67,
- TFMT6_16_16_SNORM = 68,
- TFMT6_16_16_FLOAT = 69,
- TFMT6_16_16_UINT = 70,
- TFMT6_16_16_SINT = 71,
- TFMT6_32_FLOAT = 74,
- TFMT6_32_UINT = 75,
- TFMT6_32_SINT = 76,
- TFMT6_16_16_16_16_UNORM = 96,
- TFMT6_16_16_16_16_SNORM = 97,
- TFMT6_16_16_16_16_FLOAT = 98,
- TFMT6_16_16_16_16_UINT = 99,
- TFMT6_16_16_16_16_SINT = 100,
- TFMT6_32_32_FLOAT = 103,
- TFMT6_32_32_UINT = 104,
- TFMT6_32_32_SINT = 105,
- TFMT6_32_32_32_UINT = 114,
- TFMT6_32_32_32_SINT = 115,
- TFMT6_32_32_32_FLOAT = 116,
- TFMT6_32_32_32_32_FLOAT = 130,
- TFMT6_32_32_32_32_UINT = 131,
- TFMT6_32_32_32_32_SINT = 132,
- TFMT6_X8Z24_UNORM = 160,
- TFMT6_ETC2_RG11_UNORM = 171,
- TFMT6_ETC2_RG11_SNORM = 172,
- TFMT6_ETC2_R11_UNORM = 173,
- TFMT6_ETC2_R11_SNORM = 174,
- TFMT6_ETC1 = 175,
- TFMT6_ETC2_RGB8 = 176,
- TFMT6_ETC2_RGBA8 = 177,
- TFMT6_ETC2_RGB8A1 = 178,
- TFMT6_DXT1 = 179,
- TFMT6_DXT3 = 180,
- TFMT6_DXT5 = 181,
- TFMT6_RGTC1_UNORM = 183,
- TFMT6_RGTC1_SNORM = 184,
- TFMT6_RGTC2_UNORM = 187,
- TFMT6_RGTC2_SNORM = 188,
- TFMT6_BPTC_UFLOAT = 190,
- TFMT6_BPTC_FLOAT = 191,
- TFMT6_BPTC = 192,
- TFMT6_ASTC_4x4 = 193,
- TFMT6_ASTC_5x4 = 194,
- TFMT6_ASTC_5x5 = 195,
- TFMT6_ASTC_6x5 = 196,
- TFMT6_ASTC_6x6 = 197,
- TFMT6_ASTC_8x5 = 198,
- TFMT6_ASTC_8x6 = 199,
- TFMT6_ASTC_8x8 = 200,
- TFMT6_ASTC_10x5 = 201,
- TFMT6_ASTC_10x6 = 202,
- TFMT6_ASTC_10x8 = 203,
- TFMT6_ASTC_10x10 = 204,
- TFMT6_ASTC_12x10 = 205,
- TFMT6_ASTC_12x12 = 206,
-};
-
-enum a6xx_tex_fetchsize {
- TFETCH6_1_BYTE = 0,
- TFETCH6_2_BYTE = 1,
- TFETCH6_4_BYTE = 2,
- TFETCH6_8_BYTE = 3,
- TFETCH6_16_BYTE = 4,
+enum a6xx_polygon_mode {
+ POLYMODE6_POINTS = 1,
+ POLYMODE6_LINES = 2,
+ POLYMODE6_TRIANGLES = 3,
};
enum a6xx_depth_format {
@@ -951,10 +868,50 @@ enum a6xx_cmp_perfcounter_select {
PERF_CMPDECMP_2D_PIXELS = 39,
};
+enum a6xx_2d_ifmt {
+ R2D_UNORM8 = 16,
+ R2D_INT32 = 7,
+ R2D_INT16 = 6,
+ R2D_INT8 = 5,
+ R2D_FLOAT32 = 4,
+ R2D_FLOAT16 = 3,
+ R2D_UNORM8_SRGB = 1,
+ R2D_RAW = 0,
+};
+
+enum a6xx_ztest_mode {
+ A6XX_EARLY_Z = 0,
+ A6XX_LATE_Z = 1,
+ A6XX_EARLY_LRZ_LATE_Z = 2,
+};
+
+enum a6xx_rotation {
+ ROTATE_0 = 0,
+ ROTATE_90 = 1,
+ ROTATE_180 = 2,
+ ROTATE_270 = 3,
+ ROTATE_HFLIP = 4,
+ ROTATE_VFLIP = 5,
+};
+
+enum a6xx_tess_spacing {
+ TESS_EQUAL = 0,
+ TESS_FRACTIONAL_ODD = 2,
+ TESS_FRACTIONAL_EVEN = 3,
+};
+
+enum a6xx_tess_output {
+ TESS_POINTS = 0,
+ TESS_LINES = 1,
+ TESS_CW_TRIS = 2,
+ TESS_CCW_TRIS = 3,
+};
+
enum a6xx_tex_filter {
A6XX_TEX_NEAREST = 0,
A6XX_TEX_LINEAR = 1,
A6XX_TEX_ANISO = 2,
+ A6XX_TEX_CUBIC = 3,
};
enum a6xx_tex_clamp {
@@ -973,6 +930,12 @@ enum a6xx_tex_aniso {
A6XX_TEX_ANISO_16 = 4,
};
+enum a6xx_reduction_mode {
+ A6XX_REDUCTION_MODE_AVERAGE = 0,
+ A6XX_REDUCTION_MODE_MIN = 1,
+ A6XX_REDUCTION_MODE_MAX = 2,
+};
+
enum a6xx_tex_swiz {
A6XX_TEX_X = 0,
A6XX_TEX_Y = 1,
@@ -1035,6 +998,9 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_SQE_CNTL 0x00000808
+#define REG_A6XX_CP_CP2GMU_STATUS 0x00000812
+#define A6XX_CP_CP2GMU_STATUS_IFPC 0x00000001
+
#define REG_A6XX_CP_HW_FAULT 0x00000821
#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
@@ -1050,8 +1016,44 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_APRIV_CNTL 0x00000844
#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK 0x000000ff
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_LO(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK 0x0000ff00
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT 8
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_HI(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK 0x00ff0000
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB1_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK 0xff000000
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT 24
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK;
+}
#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
+#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK 0x000001ff
+#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_SDS_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK 0xffff0000
+#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK;
+}
#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
@@ -1170,6 +1172,36 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d
+#define REG_A6XX_CP_SDS_BASE 0x0000092e
+
+#define REG_A6XX_CP_SDS_BASE_HI 0x0000092f
+
+#define REG_A6XX_CP_SDS_REM_SIZE 0x0000092e
+
+#define REG_A6XX_CP_BIN_SIZE_ADDRESS 0x00000931
+
+#define REG_A6XX_CP_BIN_SIZE_ADDRESS_HI 0x00000932
+
+#define REG_A6XX_CP_BIN_DATA_ADDR 0x00000934
+
+#define REG_A6XX_CP_BIN_DATA_ADDR_HI 0x00000935
+
+#define REG_A6XX_CP_CSQ_IB1_STAT 0x00000949
+#define A6XX_CP_CSQ_IB1_STAT_REM__MASK 0xffff0000
+#define A6XX_CP_CSQ_IB1_STAT_REM__SHIFT 16
+static inline uint32_t A6XX_CP_CSQ_IB1_STAT_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_CSQ_IB1_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB1_STAT_REM__MASK;
+}
+
+#define REG_A6XX_CP_CSQ_IB2_STAT 0x0000094a
+#define A6XX_CP_CSQ_IB2_STAT_REM__MASK 0xffff0000
+#define A6XX_CP_CSQ_IB2_STAT_REM__SHIFT 16
+static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_CSQ_IB2_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB2_STAT_REM__MASK;
+}
+
#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980
#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981
@@ -1211,6 +1243,7 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001
#define REG_A6XX_RBBM_STATUS3 0x00000213
+#define A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000
#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
@@ -1428,18 +1461,6 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
-#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
-
-#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
-
#define REG_A6XX_RBBM_PERFCTR_TSE_2_HI 0x0000046b
#define REG_A6XX_RBBM_PERFCTR_TSE_3_LO 0x0000046c
@@ -1752,6 +1773,50 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_RBBM_ISDB_CNT 0x00000533
+#define REG_A6XX_RBBM_PRIMCTR_0_LO 0x00000540
+
+#define REG_A6XX_RBBM_PRIMCTR_0_HI 0x00000541
+
+#define REG_A6XX_RBBM_PRIMCTR_1_LO 0x00000542
+
+#define REG_A6XX_RBBM_PRIMCTR_1_HI 0x00000543
+
+#define REG_A6XX_RBBM_PRIMCTR_2_LO 0x00000544
+
+#define REG_A6XX_RBBM_PRIMCTR_2_HI 0x00000545
+
+#define REG_A6XX_RBBM_PRIMCTR_3_LO 0x00000546
+
+#define REG_A6XX_RBBM_PRIMCTR_3_HI 0x00000547
+
+#define REG_A6XX_RBBM_PRIMCTR_4_LO 0x00000548
+
+#define REG_A6XX_RBBM_PRIMCTR_4_HI 0x00000549
+
+#define REG_A6XX_RBBM_PRIMCTR_5_LO 0x0000054a
+
+#define REG_A6XX_RBBM_PRIMCTR_5_HI 0x0000054b
+
+#define REG_A6XX_RBBM_PRIMCTR_6_LO 0x0000054c
+
+#define REG_A6XX_RBBM_PRIMCTR_6_HI 0x0000054d
+
+#define REG_A6XX_RBBM_PRIMCTR_7_LO 0x0000054e
+
+#define REG_A6XX_RBBM_PRIMCTR_7_HI 0x0000054f
+
+#define REG_A6XX_RBBM_PRIMCTR_8_LO 0x00000550
+
+#define REG_A6XX_RBBM_PRIMCTR_8_HI 0x00000551
+
+#define REG_A6XX_RBBM_PRIMCTR_9_LO 0x00000552
+
+#define REG_A6XX_RBBM_PRIMCTR_9_HI 0x00000553
+
+#define REG_A6XX_RBBM_PRIMCTR_10_LO 0x00000554
+
+#define REG_A6XX_RBBM_PRIMCTR_10_HI 0x00000555
+
#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
@@ -1768,6 +1833,9 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c
+#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001
+
#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
@@ -1996,6 +2064,14 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c
+#define REG_A6XX_RBBM_CLOCK_HYST_HLSQ 0x0000011d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE 0x00000120
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE 0x00000121
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122
+
#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601
@@ -2168,94 +2244,6 @@ static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_VSC_PERFCTR_VSC_SEL_1 0x00000cd9
-#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0 0x00008610
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1 0x00008611
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2 0x00008612
-
-#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3 0x00008613
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0 0x00008614
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1 0x00008615
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2 0x00008616
-
-#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3 0x00008617
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0 0x00008618
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1 0x00008619
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2 0x0000861a
-
-#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3 0x0000861b
-
-#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
-
-#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_0 0x00008e10
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_1 0x00008e11
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_2 0x00008e12
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_3 0x00008e13
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_4 0x00008e14
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_5 0x00008e15
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_6 0x00008e16
-
-#define REG_A6XX_RB_PERFCTR_RB_SEL_7 0x00008e17
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_0 0x00008e18
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_1 0x00008e19
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_2 0x00008e1a
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_3 0x00008e1b
-
-#define REG_A6XX_RB_PERFCTR_CCU_SEL_4 0x00008e1c
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_0 0x00008e2c
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_1 0x00008e2d
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_2 0x00008e2e
-
-#define REG_A6XX_RB_PERFCTR_CMP_SEL_3 0x00008e2f
-
-#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
-
-#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
-
-#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
-
-#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_0 0x00009e34
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_1 0x00009e35
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_2 0x00009e36
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_3 0x00009e37
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_4 0x00009e38
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_5 0x00009e39
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_6 0x00009e3a
-
-#define REG_A6XX_PC_PERFCTR_PC_SEL_7 0x00009e3b
-
#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x0000be10
@@ -2292,20 +2280,6 @@ static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_VFD_PERFCTR_VFD_SEL_7 0x0000a617
-#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0 0x00009604
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1 0x00009605
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2 0x00009606
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3 0x00009607
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4 0x00009608
-
-#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5 0x00009609
-
#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
@@ -2581,21 +2555,6 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
-#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
-#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff
-#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0
-static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
-{
- return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
-}
-#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x7fff0000
-#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16
-static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
-{
- return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
-}
-
#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
#define A6XX_SP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00007fff
@@ -2626,36 +2585,6 @@ static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
}
-#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1
-#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x000000ff
-#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
-{
- return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x0001ff00
-#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
-static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
-{
- return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
-}
-#define A6XX_GRAS_BIN_CONTROL_BINNING_PASS 0x00040000
-#define A6XX_GRAS_BIN_CONTROL_USE_VIZ 0x00200000
-
-#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3
-#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x000000ff
-#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
-static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
-{
- return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
-}
-#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x0001ff00
-#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
-static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
-{
- return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
-}
-
#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -2670,9 +2599,11 @@ static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
}
-#define REG_A6XX_VSC_SIZE_ADDRESS_LO 0x00000c03
+#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS_LO 0x00000c03
+
+#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS_HI 0x00000c04
-#define REG_A6XX_VSC_SIZE_ADDRESS_HI 0x00000c04
+#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS 0x00000c03
#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe
@@ -2716,114 +2647,188 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
}
-#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO 0x00000c30
+#define REG_A6XX_VSC_PRIM_STRM_ADDRESS_LO 0x00000c30
-#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_HI 0x00000c31
+#define REG_A6XX_VSC_PRIM_STRM_ADDRESS_HI 0x00000c31
-#define REG_A6XX_VSC_PIPE_DATA2_PITCH 0x00000c32
+#define REG_A6XX_VSC_PRIM_STRM_ADDRESS 0x00000c30
-#define REG_A6XX_VSC_PIPE_DATA2_ARRAY_PITCH 0x00000c33
-#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK 0xffffffff
-#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(uint32_t val)
-{
- return ((val >> 4) << A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK;
-}
+#define REG_A6XX_VSC_PRIM_STRM_PITCH 0x00000c32
-#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34
+#define REG_A6XX_VSC_PRIM_STRM_LIMIT 0x00000c33
-#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_HI 0x00000c35
+#define REG_A6XX_VSC_DRAW_STRM_ADDRESS_LO 0x00000c34
-#define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36
+#define REG_A6XX_VSC_DRAW_STRM_ADDRESS_HI 0x00000c35
-#define REG_A6XX_VSC_PIPE_DATA_ARRAY_PITCH 0x00000c37
-#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK 0xffffffff
-#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT 0
-static inline uint32_t A6XX_VSC_PIPE_DATA_ARRAY_PITCH(uint32_t val)
-{
- return ((val >> 4) << A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK;
-}
+#define REG_A6XX_VSC_DRAW_STRM_ADDRESS 0x00000c34
+
+#define REG_A6XX_VSC_DRAW_STRM_PITCH 0x00000c36
+
+#define REG_A6XX_VSC_DRAW_STRM_LIMIT 0x00000c37
+
+static inline uint32_t REG_A6XX_VSC_STATE(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
-static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+static inline uint32_t REG_A6XX_VSC_STATE_REG(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
-static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE_REG(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
-#define REG_A6XX_GRAS_UNKNOWN_8000 0x00008000
+#define REG_A6XX_GRAS_CL_CNTL 0x00008000
+#define A6XX_GRAS_CL_CNTL_CLIP_DISABLE 0x00000001
+#define A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE 0x00000002
+#define A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE 0x00000004
+#define A6XX_GRAS_CL_CNTL_UNK5 0x00000020
+#define A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
+#define A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE 0x00000080
+#define A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE 0x00000100
+#define A6XX_GRAS_CL_CNTL_PERSP_DIVISION_DISABLE 0x00000200
-#define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001
+#define REG_A6XX_GRAS_VS_CL_CNTL 0x00008001
+#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
+}
-#define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004
+#define REG_A6XX_GRAS_DS_CL_CNTL 0x00008002
+#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_GS_CL_CNTL 0x00008003
+#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_MAX_LAYER_INDEX 0x00008004
#define REG_A6XX_GRAS_CNTL 0x00008005
-#define A6XX_GRAS_CNTL_VARYING 0x00000001
-#define A6XX_GRAS_CNTL_UNK3 0x00000008
-#define A6XX_GRAS_CNTL_XCOORD 0x00000040
-#define A6XX_GRAS_CNTL_YCOORD 0x00000080
-#define A6XX_GRAS_CNTL_ZCOORD 0x00000100
-#define A6XX_GRAS_CNTL_WCOORD 0x00000200
+#define A6XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001
+#define A6XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002
+#define A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004
+#define A6XX_GRAS_CNTL_SIZE 0x00000008
+#define A6XX_GRAS_CNTL_UNK4 0x00000010
+#define A6XX_GRAS_CNTL_SIZE_PERSAMP 0x00000020
+#define A6XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0
+#define A6XX_GRAS_CNTL_COORD_MASK__SHIFT 6
+static inline uint32_t A6XX_GRAS_CNTL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CNTL_COORD_MASK__SHIFT) & A6XX_GRAS_CNTL_COORD_MASK__MASK;
+}
#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
-#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000001ff
#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
{
return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
}
-#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x0007fc00
#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
{
return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
}
-#define REG_A6XX_GRAS_CL_VPORT_XOFFSET_0 0x00008010
-#define A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XOFFSET(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XSCALE(uint32_t i0) { return 0x00008011 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE(float val)
{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE__MASK;
}
-#define REG_A6XX_GRAS_CL_VPORT_XSCALE_0 0x00008011
-#define A6XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YOFFSET(uint32_t i0) { return 0x00008012 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET(float val)
{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET__MASK;
}
-#define REG_A6XX_GRAS_CL_VPORT_YOFFSET_0 0x00008012
-#define A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YSCALE(uint32_t i0) { return 0x00008013 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE(float val)
{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE__MASK;
}
-#define REG_A6XX_GRAS_CL_VPORT_YSCALE_0 0x00008013
-#define A6XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZOFFSET(uint32_t i0) { return 0x00008014 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET(float val)
{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET__MASK;
}
-#define REG_A6XX_GRAS_CL_VPORT_ZOFFSET_0 0x00008014
-#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZSCALE(uint32_t i0) { return 0x00008015 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MIN(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+#define A6XX_GRAS_CL_Z_CLAMP_MIN__MASK 0xffffffff
+#define A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MIN(float val)
{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+ return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MIN__MASK;
}
-#define REG_A6XX_GRAS_CL_VPORT_ZSCALE_0 0x00008015
-#define A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
-#define A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
-static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MAX(uint32_t i0) { return 0x00008071 + 0x2*i0; }
+#define A6XX_GRAS_CL_Z_CLAMP_MAX__MASK 0xffffffff
+#define A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MAX(float val)
{
- return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+ return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MAX__MASK;
}
#define REG_A6XX_GRAS_SU_CNTL 0x00008090
@@ -2837,7 +2842,19 @@ static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
}
#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A6XX_GRAS_SU_CNTL_UNK12__MASK 0x00001000
+#define A6XX_GRAS_SU_CNTL_UNK12__SHIFT 12
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK12__SHIFT) & A6XX_GRAS_SU_CNTL_UNK12__MASK;
+}
#define A6XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
+#define A6XX_GRAS_SU_CNTL_UNK15__MASK 0x007f8000
+#define A6XX_GRAS_SU_CNTL_UNK15__SHIFT 15
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK;
+}
#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
@@ -2854,7 +2871,7 @@ static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
}
#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092
-#define A6XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POINT_SIZE__MASK 0x0000ffff
#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0
static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
{
@@ -2862,7 +2879,12 @@ static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
}
#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094
-#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK;
+}
#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
@@ -2895,13 +2917,65 @@ static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_dep
{
return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
}
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000008
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
#define REG_A6XX_GRAS_UNKNOWN_8099 0x00008099
-#define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b
+#define REG_A6XX_GRAS_UNKNOWN_809A 0x0000809a
+
+#define REG_A6XX_GRAS_VS_LAYER_CNTL 0x0000809b
+#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_VIEW 0x00000002
+
+#define REG_A6XX_GRAS_GS_LAYER_CNTL 0x0000809c
+#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW 0x00000002
+
+#define REG_A6XX_GRAS_DS_LAYER_CNTL 0x0000809d
+#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_VIEW 0x00000002
#define REG_A6XX_GRAS_UNKNOWN_80A0 0x000080a0
+#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1
+#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x0000003f
+#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x00007f00
+#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_BINNING_PASS 0x00040000
+#define A6XX_GRAS_BIN_CONTROL_UNK19__MASK 0x00080000
+#define A6XX_GRAS_BIN_CONTROL_UNK19__SHIFT 19
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK19(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_UNK19__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK19__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_UNK20__MASK 0x00100000
+#define A6XX_GRAS_BIN_CONTROL_UNK20__SHIFT 20
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK20(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_UNK20__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK20__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_USE_VIZ 0x00200000
+#define A6XX_GRAS_BIN_CONTROL_UNK22__MASK 0x0fc00000
+#define A6XX_GRAS_BIN_CONTROL_UNK22__SHIFT 22
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK22(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_UNK22__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK22__MASK;
+}
+
#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -2909,6 +2983,18 @@ static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples va
{
return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
}
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK 0x00000004
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK;
+}
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK 0x00000008
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT 3
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK;
+}
#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
@@ -2919,83 +3005,180 @@ static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples v
}
#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
-#define REG_A6XX_GRAS_UNKNOWN_80A4 0x000080a4
+#define REG_A6XX_GRAS_SAMPLE_CONFIG 0x000080a4
+#define A6XX_GRAS_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_GRAS_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
-#define REG_A6XX_GRAS_UNKNOWN_80A5 0x000080a5
+#define REG_A6XX_GRAS_SAMPLE_LOCATION_0 0x000080a5
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
-#define REG_A6XX_GRAS_UNKNOWN_80A6 0x000080a6
+#define REG_A6XX_GRAS_SAMPLE_LOCATION_1 0x000080a6
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
-#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x000080b0
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
{
- return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
}
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
{
- return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
}
-#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x000080b1
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR(uint32_t i0) { return 0x000080b1 + 0x2*i0; }
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
{
- return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
}
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
-#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
{
- return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
}
-#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x000080d0
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(uint32_t val)
{
- return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK;
}
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(uint32_t val)
{
- return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK;
}
-#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x000080d1
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(uint32_t i0) { return 0x000080d1 + 0x2*i0; }
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X(uint32_t val)
{
- return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK;
}
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
-#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y(uint32_t val)
{
- return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK;
}
#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00003fff
#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
{
return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
}
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x3fff0000
#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
{
@@ -3003,14 +3186,13 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
}
#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00003fff
#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
{
return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
}
-#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x3fff0000
#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
{
@@ -3021,15 +3203,21 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
-#define A6XX_GRAS_LRZ_CNTL_UNK3 0x00000008
-#define A6XX_GRAS_LRZ_CNTL_UNK4 0x00000010
+#define A6XX_GRAS_LRZ_CNTL_FC_ENABLE 0x00000008
+#define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE 0x00000010
+#define A6XX_GRAS_LRZ_CNTL_UNK5__MASK 0x000003e0
+#define A6XX_GRAS_LRZ_CNTL_UNK5__SHIFT 5
+static inline uint32_t A6XX_GRAS_LRZ_CNTL_UNK5(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_CNTL_UNK5__SHIFT) & A6XX_GRAS_LRZ_CNTL_UNK5__MASK;
+}
#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
#define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102
#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK;
}
@@ -3038,78 +3226,128 @@ static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_color_fmt v
#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_HI 0x00008104
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE 0x00008103
+#define A6XX_GRAS_LRZ_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_BUFFER_BASE__MASK;
+}
+
#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000000ff
#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
{
return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
}
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
-#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffffc00
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 10
static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+ return ((val >> 4) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
}
#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x00008106
#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
-#define REG_A6XX_GRAS_UNKNOWN_8109 0x00008109
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE 0x00008106
+#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_GRAS_SAMPLE_CNTL 0x00008109
+#define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
+
+#define REG_A6XX_GRAS_UNKNOWN_810A 0x0000810a
+#define A6XX_GRAS_UNKNOWN_810A_UNK0__MASK 0x000007ff
+#define A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT 0
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK0__MASK;
+}
+#define A6XX_GRAS_UNKNOWN_810A_UNK16__MASK 0x07ff0000
+#define A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT 16
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK16(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK16__MASK;
+}
+#define A6XX_GRAS_UNKNOWN_810A_UNK28__MASK 0xf0000000
+#define A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT 28
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK28(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK28__MASK;
+}
#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
+#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK3__MASK 0x00000078
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK3__SHIFT 3
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK3__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK3__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SOLID_COLOR 0x00000080
#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
}
#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
-
-#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
-#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
-#define A6XX_GRAS_2D_SRC_TL_X_X__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_SRC_TL_X_X(uint32_t val)
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK 0x00060000
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT 17
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK17(uint32_t val)
{
- return ((val) << A6XX_GRAS_2D_SRC_TL_X_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X_X__MASK;
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK;
}
-
-#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
-#define A6XX_GRAS_2D_SRC_BR_X_X__MASK 0x00ffff00
-#define A6XX_GRAS_2D_SRC_BR_X_X__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_SRC_BR_X_X(uint32_t val)
+#define A6XX_GRAS_2D_BLIT_CNTL_D24S8 0x00080000
+#define A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK 0x00f00000
+#define A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT 20
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_MASK(uint32_t val)
{
- return ((val) << A6XX_GRAS_2D_SRC_BR_X_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X_X__MASK;
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK;
}
-
-#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
-#define A6XX_GRAS_2D_SRC_TL_Y_Y__MASK 0x00ffff00
-#define A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y_Y(uint32_t val)
+#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK 0x1f000000
+#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT 24
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
{
- return ((val) << A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y_Y__MASK;
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK;
}
-
-#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
-#define A6XX_GRAS_2D_SRC_BR_Y_Y__MASK 0x00ffff00
-#define A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT 8
-static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y_Y(uint32_t val)
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK29__MASK 0x20000000
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK29__SHIFT 29
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK29(uint32_t val)
{
- return ((val) << A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y_Y__MASK;
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK29__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK29__MASK;
}
+#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
+
+#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
+
+#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
+
+#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
+
#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
-#define A6XX_GRAS_2D_DST_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00003fff
#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0
static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
{
return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
}
-#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x3fff0000
#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16
static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
{
@@ -3117,71 +3355,131 @@ static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
}
#define REG_A6XX_GRAS_2D_DST_BR 0x00008406
-#define A6XX_GRAS_2D_DST_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00003fff
#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0
static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
{
return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
}
-#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x3fff0000
#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16
static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
{
return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
}
-#define REG_A6XX_GRAS_RESOLVE_CNTL_1 0x0000840a
-#define A6XX_GRAS_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_RESOLVE_CNTL_1_X__MASK 0x00007fff
-#define A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_X(uint32_t val)
+#define REG_A6XX_GRAS_2D_UNKNOWN_8407 0x00008407
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8408 0x00008408
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8409 0x00008409
+
+#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_1 0x0000840a
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_X(uint32_t val)
{
- return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_X__MASK;
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK;
}
-#define A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
-#define A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_Y(uint32_t val)
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_Y(uint32_t val)
{
- return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK;
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK;
}
-#define REG_A6XX_GRAS_RESOLVE_CNTL_2 0x0000840b
-#define A6XX_GRAS_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_GRAS_RESOLVE_CNTL_2_X__MASK 0x00007fff
-#define A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT 0
-static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_X(uint32_t val)
+#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_2 0x0000840b
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_X(uint32_t val)
{
- return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_X__MASK;
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK;
}
-#define A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
-#define A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT 16
-static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val)
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val)
{
- return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK;
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK;
}
#define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600
+#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0 0x00008610
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1 0x00008611
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2 0x00008612
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3 0x00008613
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0 0x00008614
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1 0x00008615
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2 0x00008616
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3 0x00008617
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0 0x00008618
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1 0x00008619
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2 0x0000861a
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3 0x0000861b
+
#define REG_A6XX_RB_BIN_CONTROL 0x00008800
-#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x000000ff
+#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f
#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0
static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
{
return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
}
-#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x0001ff00
+#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00
#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8
static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
{
return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
}
#define A6XX_RB_BIN_CONTROL_BINNING_PASS 0x00040000
+#define A6XX_RB_BIN_CONTROL_UNK19__MASK 0x00080000
+#define A6XX_RB_BIN_CONTROL_UNK19__SHIFT 19
+static inline uint32_t A6XX_RB_BIN_CONTROL_UNK19(uint32_t val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_UNK19__SHIFT) & A6XX_RB_BIN_CONTROL_UNK19__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_UNK20__MASK 0x00100000
+#define A6XX_RB_BIN_CONTROL_UNK20__SHIFT 20
+static inline uint32_t A6XX_RB_BIN_CONTROL_UNK20(uint32_t val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_UNK20__SHIFT) & A6XX_RB_BIN_CONTROL_UNK20__MASK;
+}
#define A6XX_RB_BIN_CONTROL_USE_VIZ 0x00200000
+#define A6XX_RB_BIN_CONTROL_UNK22__MASK 0x07c00000
+#define A6XX_RB_BIN_CONTROL_UNK22__SHIFT 22
+static inline uint32_t A6XX_RB_BIN_CONTROL_UNK22(uint32_t val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_UNK22__SHIFT) & A6XX_RB_BIN_CONTROL_UNK22__MASK;
+}
#define REG_A6XX_RB_RENDER_CNTL 0x00008801
+#define A6XX_RB_RENDER_CNTL_UNK3 0x00000008
#define A6XX_RB_RENDER_CNTL_UNK4 0x00000010
+#define A6XX_RB_RENDER_CNTL_UNK5__MASK 0x00000060
+#define A6XX_RB_RENDER_CNTL_UNK5__SHIFT 5
+static inline uint32_t A6XX_RB_RENDER_CNTL_UNK5(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_UNK5__SHIFT) & A6XX_RB_RENDER_CNTL_UNK5__MASK;
+}
#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080
+#define A6XX_RB_RENDER_CNTL_UNK8__MASK 0x00001f00
+#define A6XX_RB_RENDER_CNTL_UNK8__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_CNTL_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_UNK8__SHIFT) & A6XX_RB_RENDER_CNTL_UNK8__MASK;
+}
#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
@@ -3197,6 +3495,18 @@ static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
{
return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
}
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK 0x00000004
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK;
+}
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK 0x00000008
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT 3
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK;
+}
#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
@@ -3207,28 +3517,141 @@ static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
}
#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
-#define REG_A6XX_RB_UNKNOWN_8804 0x00008804
+#define REG_A6XX_RB_SAMPLE_CONFIG 0x00008804
+#define A6XX_RB_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
-#define REG_A6XX_RB_UNKNOWN_8805 0x00008805
+#define REG_A6XX_RB_SAMPLE_LOCATION_0 0x00008805
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
-#define REG_A6XX_RB_UNKNOWN_8806 0x00008806
+#define REG_A6XX_RB_SAMPLE_LOCATION_1 0x00008806
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
-#define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001
-#define A6XX_RB_RENDER_CONTROL0_UNK3 0x00000008
-#define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
-#define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
-#define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
-#define A6XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004
+#define A6XX_RB_RENDER_CONTROL0_SIZE 0x00000008
+#define A6XX_RB_RENDER_CONTROL0_UNK4 0x00000010
+#define A6XX_RB_RENDER_CONTROL0_SIZE_PERSAMP 0x00000020
+#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0
+#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6
+static inline uint32_t A6XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
+}
#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400
#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a
#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
-#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A6XX_RB_RENDER_CONTROL1_UNK1 0x00000002
+#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000004
#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008
+#define A6XX_RB_RENDER_CONTROL1_UNK4 0x00000010
+#define A6XX_RB_RENDER_CONTROL1_UNK5 0x00000020
+#define A6XX_RB_RENDER_CONTROL1_SIZE 0x00000040
+#define A6XX_RB_RENDER_CONTROL1_UNK7 0x00000080
+#define A6XX_RB_RENDER_CONTROL1_UNK8 0x00000100
#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b
+#define A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001
#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK 0x00000004
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF 0x00000008
#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c
#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
@@ -3348,7 +3771,8 @@ static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dithe
#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
-#define REG_A6XX_RB_UNKNOWN_8810 0x00008810
+#define REG_A6XX_RB_SAMPLE_CNTL 0x00008810
+#define A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
#define REG_A6XX_RB_UNKNOWN_8811 0x00008811
@@ -3426,7 +3850,7 @@ static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
}
@@ -3436,6 +3860,12 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode
{
return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
}
+#define A6XX_RB_MRT_BUF_INFO_UNK10__MASK 0x00000400
+#define A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT 10
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_UNK10(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT) & A6XX_RB_MRT_BUF_INFO_UNK10__MASK;
+}
#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
@@ -3444,7 +3874,7 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
}
static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
-#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_PITCH__MASK 0x0000ffff
#define A6XX_RB_MRT_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
{
@@ -3452,7 +3882,7 @@ static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
}
static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
-#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0x1fffffff
#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
{
@@ -3463,7 +3893,21 @@ static inline uint32_t REG_A6XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x00008825
static inline uint32_t REG_A6XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x00008826 + 0x8*i0; }
+static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; }
+#define A6XX_RB_MRT_BASE__MASK 0xffffffff
+#define A6XX_RB_MRT_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_BASE__SHIFT) & A6XX_RB_MRT_BASE__MASK;
+}
+
static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
+#define A6XX_RB_MRT_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_MRT_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_MRT_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_MRT_BASE_GMEM__SHIFT) & A6XX_RB_MRT_BASE_GMEM__MASK;
+}
#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
@@ -3520,7 +3964,9 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_ONE 0x00000800
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3529,7 +3975,12 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
}
#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870
-#define A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003
+#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
+{
+ return ((val) << A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK;
+}
#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
#define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
@@ -3540,7 +3991,9 @@ static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
}
+#define A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE 0x00000020
#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+#define A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE 0x00000080
#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
@@ -3549,9 +4002,15 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_fo
{
return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
}
+#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018
+#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
-#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0x00003fff
#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
{
@@ -3559,7 +4018,7 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
}
#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
-#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0x0fffffff
#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
{
@@ -3570,11 +4029,37 @@ static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_RB_DEPTH_BUFFER_BASE_HI 0x00008876
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE 0x00008875
+#define A6XX_RB_DEPTH_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE__MASK;
+}
+
#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
+#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK;
+}
-#define REG_A6XX_RB_UNKNOWN_8878 0x00008878
+#define REG_A6XX_RB_Z_BOUNDS_MIN 0x00008878
+#define A6XX_RB_Z_BOUNDS_MIN__MASK 0xffffffff
+#define A6XX_RB_Z_BOUNDS_MIN__SHIFT 0
+static inline uint32_t A6XX_RB_Z_BOUNDS_MIN(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_BOUNDS_MIN__SHIFT) & A6XX_RB_Z_BOUNDS_MIN__MASK;
+}
-#define REG_A6XX_RB_UNKNOWN_8879 0x00008879
+#define REG_A6XX_RB_Z_BOUNDS_MAX 0x00008879
+#define A6XX_RB_Z_BOUNDS_MAX__MASK 0xffffffff
+#define A6XX_RB_Z_BOUNDS_MAX__SHIFT 0
+static inline uint32_t A6XX_RB_Z_BOUNDS_MAX(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_BOUNDS_MAX__SHIFT) & A6XX_RB_Z_BOUNDS_MAX__MASK;
+}
#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880
#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
@@ -3631,9 +4116,10 @@ static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
#define REG_A6XX_RB_STENCIL_INFO 0x00008881
#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A6XX_RB_STENCIL_INFO_UNK1 0x00000002
#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
-#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0x00000fff
#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
{
@@ -3641,7 +4127,7 @@ static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
}
#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
-#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0x00ffffff
#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
{
@@ -3652,7 +4138,21 @@ static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_RB_STENCIL_BUFFER_BASE_HI 0x00008885
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE 0x00008884
+#define A6XX_RB_STENCIL_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCIL_BUFFER_BASE__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE__MASK;
+}
+
#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
+#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK;
+}
#define REG_A6XX_RB_STENCILREF 0x00008887
#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
@@ -3697,14 +4197,13 @@ static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val)
}
#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
-#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00003fff
#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0
static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
{
return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
}
-#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x3fff0000
#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16
static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
{
@@ -3712,22 +4211,50 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
}
#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_UNK0 0x00000001
#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
#define REG_A6XX_RB_LRZ_CNTL 0x00008898
#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
+#define REG_A6XX_RB_Z_CLAMP_MIN 0x000088c0
+#define A6XX_RB_Z_CLAMP_MIN__MASK 0xffffffff
+#define A6XX_RB_Z_CLAMP_MIN__SHIFT 0
+static inline uint32_t A6XX_RB_Z_CLAMP_MIN(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_CLAMP_MIN__SHIFT) & A6XX_RB_Z_CLAMP_MIN__MASK;
+}
+
+#define REG_A6XX_RB_Z_CLAMP_MAX 0x000088c1
+#define A6XX_RB_Z_CLAMP_MAX__MASK 0xffffffff
+#define A6XX_RB_Z_CLAMP_MAX__SHIFT 0
+static inline uint32_t A6XX_RB_Z_CLAMP_MAX(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_CLAMP_MAX__SHIFT) & A6XX_RB_Z_CLAMP_MAX__MASK;
+}
+
#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
+#define A6XX_RB_UNKNOWN_88D0_UNK0__MASK 0x00001fff
+#define A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT 0
+static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK0__MASK;
+}
+#define A6XX_RB_UNKNOWN_88D0_UNK16__MASK 0x07ff0000
+#define A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT 16
+static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK16(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK16__MASK;
+}
#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
-#define A6XX_RB_BLIT_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00003fff
#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0
static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
{
return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
}
-#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x3fff0000
#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16
static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
{
@@ -3735,20 +4262,47 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
}
#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2
-#define A6XX_RB_BLIT_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00003fff
#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0
static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
{
return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
}
-#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x3fff0000
#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16
static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
{
return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
}
+#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3
+#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x0000003f
+#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x00007f00
+#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
+#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00003fff
+#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x3fff0000
+#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
+}
+
#define REG_A6XX_RB_MSAA_CNTL 0x000088d5
#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018
#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3
@@ -3758,6 +4312,12 @@ static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
}
#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
+#define A6XX_RB_BLIT_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_BLIT_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_BLIT_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_BLIT_BASE_GMEM__SHIFT) & A6XX_RB_BLIT_BASE_GMEM__MASK;
+}
#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
@@ -3773,17 +4333,26 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
{
return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+}
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
-static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
}
-#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
-#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
-static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+#define A6XX_RB_BLIT_DST_INFO_UNK15 0x00008000
+
+#define REG_A6XX_RB_BLIT_DST 0x000088d8
+#define A6XX_RB_BLIT_DST__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST(uint32_t val)
{
- return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+ return ((val) << A6XX_RB_BLIT_DST__SHIFT) & A6XX_RB_BLIT_DST__MASK;
}
#define REG_A6XX_RB_BLIT_DST_LO 0x000088d8
@@ -3791,7 +4360,7 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val
#define REG_A6XX_RB_BLIT_DST_HI 0x000088d9
#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
-#define A6XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_PITCH__MASK 0x0000ffff
#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
{
@@ -3799,17 +4368,39 @@ static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
}
#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
-#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0x1fffffff
#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
{
return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
}
+#define REG_A6XX_RB_BLIT_FLAG_DST 0x000088dc
+#define A6XX_RB_BLIT_FLAG_DST__MASK 0xffffffff
+#define A6XX_RB_BLIT_FLAG_DST__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_FLAG_DST__SHIFT) & A6XX_RB_BLIT_FLAG_DST__MASK;
+}
+
#define REG_A6XX_RB_BLIT_FLAG_DST_LO 0x000088dc
#define REG_A6XX_RB_BLIT_FLAG_DST_HI 0x000088dd
+#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH 0x000088de
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK 0x0ffff800
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK;
+}
+
#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0
@@ -3829,14 +4420,76 @@ static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val)
{
return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK;
}
+#define A6XX_RB_BLIT_INFO_UNK8__MASK 0x00000300
+#define A6XX_RB_BLIT_INFO_UNK8__SHIFT 8
+static inline uint32_t A6XX_RB_BLIT_INFO_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_UNK8__SHIFT) & A6XX_RB_BLIT_INFO_UNK8__MASK;
+}
+#define A6XX_RB_BLIT_INFO_UNK12__MASK 0x0000f000
+#define A6XX_RB_BLIT_INFO_UNK12__SHIFT 12
+static inline uint32_t A6XX_RB_BLIT_INFO_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_UNK12__SHIFT) & A6XX_RB_BLIT_INFO_UNK12__MASK;
+}
#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE 0x000088f1
+#define A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH 0x000088f3
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x00fff800
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F4 0x000088f4
+
#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x00008900
#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x00008901
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE 0x00008900
+#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK;
+}
+
#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK 0x0000007f
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK 0x00000700
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT 8
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK;
+}
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x0ffff800
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
@@ -3844,39 +4497,93 @@ static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return
static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x00008904 + 0x3*i0; }
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK 0xffffffff
+#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK;
+}
+
static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
{
- return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+ return ((val >> 6) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
}
-#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffff800
#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
{
- return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+ return ((val >> 7) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
}
#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_LO 0x00008927
#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_HI 0x00008928
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR 0x00008927
+#define A6XX_RB_SAMPLE_COUNT_ADDR__MASK 0xffffffff
+#define A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_COUNT_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT) & A6XX_RB_SAMPLE_COUNT_ADDR__MASK;
+}
+
#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00
+#define A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
+#define A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT 0
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_UNK3__MASK 0x00000078
+#define A6XX_RB_2D_BLIT_CNTL_UNK3__SHIFT 3
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK3__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK3__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_SOLID_COLOR 0x00000080
#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
-static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
}
#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
+#define A6XX_RB_2D_BLIT_CNTL_UNK17__MASK 0x00060000
+#define A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT 17
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK17(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK17__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_D24S8 0x00080000
+#define A6XX_RB_2D_BLIT_CNTL_MASK__MASK 0x00f00000
+#define A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT 20
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_RB_2D_BLIT_CNTL_MASK__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_IFMT__MASK 0x1f000000
+#define A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT 24
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_IFMT__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_UNK29__MASK 0x20000000
+#define A6XX_RB_2D_BLIT_CNTL_UNK29__SHIFT 29
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK29(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK29__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK29__MASK;
+}
-#define REG_A6XX_RB_UNKNOWN_8C01 0x00008c01
+#define REG_A6XX_RB_2D_UNKNOWN_8C01 0x00008c01
#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
}
@@ -3893,23 +4600,98 @@ static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
}
#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000
+#define A6XX_RB_2D_DST_INFO_SRGB 0x00002000
+#define A6XX_RB_2D_DST_INFO_SAMPLES__MASK 0x0000c000
+#define A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT 14
+static inline uint32_t A6XX_RB_2D_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_2D_DST_INFO_SAMPLES__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FILTER 0x00010000
+#define A6XX_RB_2D_DST_INFO_SAMPLES_AVERAGE 0x00040000
+#define A6XX_RB_2D_DST_INFO_UNK20 0x00100000
+#define A6XX_RB_2D_DST_INFO_UNK22 0x00400000
#define REG_A6XX_RB_2D_DST_LO 0x00008c18
#define REG_A6XX_RB_2D_DST_HI 0x00008c19
-#define REG_A6XX_RB_2D_DST_SIZE 0x00008c1a
-#define A6XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
-#define A6XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
-static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
+#define REG_A6XX_RB_2D_DST 0x00008c18
+#define A6XX_RB_2D_DST__MASK 0xffffffff
+#define A6XX_RB_2D_DST__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST__SHIFT) & A6XX_RB_2D_DST__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PITCH 0x00008c1a
+#define A6XX_RB_2D_DST_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE1 0x00008c1b
+#define A6XX_RB_2D_DST_PLANE1__MASK 0xffffffff
+#define A6XX_RB_2D_DST_PLANE1__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE1(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_PLANE1__SHIFT) & A6XX_RB_2D_DST_PLANE1__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE_PITCH 0x00008c1d
+#define A6XX_RB_2D_DST_PLANE_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE2 0x00008c1e
+#define A6XX_RB_2D_DST_PLANE2__MASK 0xffffffff
+#define A6XX_RB_2D_DST_PLANE2__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE2(uint32_t val)
{
- return ((val >> 6) << A6XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A6XX_RB_2D_DST_SIZE_PITCH__MASK;
+ return ((val) << A6XX_RB_2D_DST_PLANE2__SHIFT) & A6XX_RB_2D_DST_PLANE2__MASK;
}
#define REG_A6XX_RB_2D_DST_FLAGS_LO 0x00008c20
#define REG_A6XX_RB_2D_DST_FLAGS_HI 0x00008c21
+#define REG_A6XX_RB_2D_DST_FLAGS 0x00008c20
+#define A6XX_RB_2D_DST_FLAGS__MASK 0xffffffff
+#define A6XX_RB_2D_DST_FLAGS__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_FLAGS__SHIFT) & A6XX_RB_2D_DST_FLAGS__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PITCH 0x00008c22
+#define A6XX_RB_2D_DST_FLAGS_PITCH__MASK 0x000000ff
+#define A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PLANE 0x00008c23
+#define A6XX_RB_2D_DST_FLAGS_PLANE__MASK 0xffffffff
+#define A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PLANE_PITCH 0x00008c25
+#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK 0x000000ff
+#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK;
+}
+
#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d
@@ -3922,15 +4704,205 @@ static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
#define REG_A6XX_RB_UNKNOWN_8E04 0x00008e04
+#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
+
#define REG_A6XX_RB_CCU_CNTL 0x00008e07
+#define A6XX_RB_CCU_CNTL_OFFSET__MASK 0xff800000
+#define A6XX_RB_CCU_CNTL_OFFSET__SHIFT 23
+static inline uint32_t A6XX_RB_CCU_CNTL_OFFSET(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_CCU_CNTL_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_OFFSET__MASK;
+}
+#define A6XX_RB_CCU_CNTL_GMEM 0x00400000
+#define A6XX_RB_CCU_CNTL_UNK2 0x00000004
+
+#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
+#define A6XX_RB_NC_MODE_CNTL_MODE 0x00000001
+#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
+#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT 1
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK;
+}
+#define A6XX_RB_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008
+#define A6XX_RB_NC_MODE_CNTL_AMSBC 0x00000010
+#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000400
+#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT 10
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK;
+}
+#define A6XX_RB_NC_MODE_CNTL_RGB565_PREDICATOR 0x00000800
+#define A6XX_RB_NC_MODE_CNTL_UNK12__MASK 0x00003000
+#define A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT 12
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK;
+}
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_0 0x00008e10
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_1 0x00008e11
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_2 0x00008e12
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_3 0x00008e13
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_4 0x00008e14
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_5 0x00008e15
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_6 0x00008e16
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_7 0x00008e17
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_0 0x00008e18
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_1 0x00008e19
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_2 0x00008e1a
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_3 0x00008e1b
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_4 0x00008e1c
+
+#define REG_A6XX_RB_UNKNOWN_8E28 0x00008e28
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_0 0x00008e2c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_1 0x00008e2d
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_2 0x00008e2e
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_3 0x00008e2f
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
+
+#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
+
+#define REG_A6XX_RB_UNKNOWN_8E51 0x00008e51
+#define A6XX_RB_UNKNOWN_8E51__MASK 0xffffffff
+#define A6XX_RB_UNKNOWN_8E51__SHIFT 0
+static inline uint32_t A6XX_RB_UNKNOWN_8E51(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_8E51__SHIFT) & A6XX_RB_UNKNOWN_8E51__MASK;
+}
+
+#define REG_A6XX_VPC_UNKNOWN_9100 0x00009100
+
+#define REG_A6XX_VPC_VS_CLIP_CNTL 0x00009101
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_CLIP_CNTL 0x00009102
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_CLIP_CNTL 0x00009103
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
-#define REG_A6XX_VPC_UNKNOWN_9101 0x00009101
+#define REG_A6XX_VPC_VS_LAYER_CNTL 0x00009104
+#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK;
+}
-#define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104
+#define REG_A6XX_VPC_GS_LAYER_CNTL 0x00009105
+#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_LAYER_CNTL 0x00009106
+#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK;
+}
#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
-#define REG_A6XX_VPC_UNKNOWN_9108 0x00009108
+#define REG_A6XX_VPC_POLYGON_MODE 0x00009108
+#define A6XX_VPC_POLYGON_MODE_MODE__MASK 0x00000003
+#define A6XX_VPC_POLYGON_MODE_MODE__SHIFT 0
+static inline uint32_t A6XX_VPC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+ return ((val) << A6XX_VPC_POLYGON_MODE_MODE__SHIFT) & A6XX_VPC_POLYGON_MODE_MODE__MASK;
+}
static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
@@ -3949,6 +4921,12 @@ static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i
static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
#define REG_A6XX_VPC_SO_CNTL 0x00009216
+#define A6XX_VPC_SO_CNTL_UNK0__MASK 0x000000ff
+#define A6XX_VPC_SO_CNTL_UNK0__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_CNTL_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_CNTL_UNK0__SHIFT) & A6XX_VPC_SO_CNTL_UNK0__MASK;
+}
#define A6XX_VPC_SO_CNTL_ENABLE 0x00010000
#define REG_A6XX_VPC_SO_PROG 0x00009217
@@ -3979,44 +4957,143 @@ static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
}
#define A6XX_VPC_SO_PROG_B_EN 0x00800000
+#define REG_A6XX_VPC_SO_STREAM_COUNTS_LO 0x00009218
+
+#define REG_A6XX_VPC_SO_STREAM_COUNTS_HI 0x00009219
+
+#define REG_A6XX_VPC_SO_STREAM_COUNTS 0x00009218
+#define A6XX_VPC_SO_STREAM_COUNTS__MASK 0xffffffff
+#define A6XX_VPC_SO_STREAM_COUNTS__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_STREAM_COUNTS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_COUNTS__SHIFT) & A6XX_VPC_SO_STREAM_COUNTS__MASK;
+}
+
static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_VPC_SO_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_BUFFER_BASE__SHIFT) & A6XX_VPC_SO_BUFFER_BASE__MASK;
+}
+
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000921b + 0x7*i0; }
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_SIZE__MASK 0xfffffffc
+#define A6XX_VPC_SO_BUFFER_SIZE__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_BUFFER_SIZE(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_BUFFER_SIZE__SHIFT) & A6XX_VPC_SO_BUFFER_SIZE__MASK;
+}
static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; }
static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_OFFSET__MASK 0xfffffffc
+#define A6XX_VPC_SO_BUFFER_OFFSET__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_BUFFER_OFFSET(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_BUFFER_OFFSET__SHIFT) & A6XX_VPC_SO_BUFFER_OFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE(uint32_t i0) { return 0x0000921f + 0x7*i0; }
+#define A6XX_VPC_SO_FLUSH_BASE__MASK 0xffffffff
+#define A6XX_VPC_SO_FLUSH_BASE__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_FLUSH_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_FLUSH_BASE__SHIFT) & A6XX_VPC_SO_FLUSH_BASE__MASK;
+}
static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000921f + 0x7*i0; }
static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x00009220 + 0x7*i0; }
-#define REG_A6XX_VPC_UNKNOWN_9236 0x00009236
+#define REG_A6XX_VPC_POINT_COORD_INVERT 0x00009236
+#define A6XX_VPC_POINT_COORD_INVERT_INVERT 0x00000001
#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300
-#define REG_A6XX_VPC_PACK 0x00009301
-#define A6XX_VPC_PACK_STRIDE_IN_VPC__MASK 0x000000ff
-#define A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_VPC_PACK_STRIDE_IN_VPC(uint32_t val)
+#define REG_A6XX_VPC_VS_PACK 0x00009301
+#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_VS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_PACK_POSITIONLOC(uint32_t val)
{
- return ((val) << A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_PACK_STRIDE_IN_VPC__MASK;
+ return ((val) << A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_VS_PACK_POSITIONLOC__MASK;
}
-#define A6XX_VPC_PACK_NUMNONPOSVAR__MASK 0x0000ff00
-#define A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT 8
-static inline uint32_t A6XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+#define A6XX_VPC_VS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_PACK_PSIZELOC(uint32_t val)
{
- return ((val) << A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A6XX_VPC_PACK_NUMNONPOSVAR__MASK;
+ return ((val) << A6XX_VPC_VS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_VS_PACK_PSIZELOC__MASK;
}
-#define A6XX_VPC_PACK_PSIZELOC__MASK 0x00ff0000
-#define A6XX_VPC_PACK_PSIZELOC__SHIFT 16
-static inline uint32_t A6XX_VPC_PACK_PSIZELOC(uint32_t val)
+#define A6XX_VPC_VS_PACK_UNK24__MASK 0x0f000000
+#define A6XX_VPC_VS_PACK_UNK24__SHIFT 24
+static inline uint32_t A6XX_VPC_VS_PACK_UNK24(uint32_t val)
{
- return ((val) << A6XX_VPC_PACK_PSIZELOC__SHIFT) & A6XX_VPC_PACK_PSIZELOC__MASK;
+ return ((val) << A6XX_VPC_VS_PACK_UNK24__SHIFT) & A6XX_VPC_VS_PACK_UNK24__MASK;
+}
+
+#define REG_A6XX_VPC_GS_PACK 0x00009302
+#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_GS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_PACK_POSITIONLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_GS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_GS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_GS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_GS_PACK_UNK24__MASK 0x0f000000
+#define A6XX_VPC_GS_PACK_UNK24__SHIFT 24
+static inline uint32_t A6XX_VPC_GS_PACK_UNK24(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_UNK24__SHIFT) & A6XX_VPC_GS_PACK_UNK24__MASK;
+}
+
+#define REG_A6XX_VPC_DS_PACK 0x00009303
+#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_DS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_PACK_POSITIONLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_DS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_DS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_DS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_DS_PACK_UNK24__MASK 0x0f000000
+#define A6XX_VPC_DS_PACK_UNK24__SHIFT 24
+static inline uint32_t A6XX_VPC_DS_PACK_UNK24(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_UNK24__SHIFT) & A6XX_VPC_DS_PACK_UNK24__MASK;
}
#define REG_A6XX_VPC_CNTL_0 0x00009304
@@ -4026,7 +5103,19 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
{
return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
}
+#define A6XX_VPC_CNTL_0_PRIMIDLOC__MASK 0x0000ff00
+#define A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_CNTL_0_PRIMIDLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT) & A6XX_VPC_CNTL_0_PRIMIDLOC__MASK;
+}
#define A6XX_VPC_CNTL_0_VARYING 0x00010000
+#define A6XX_VPC_CNTL_0_UNKLOC__MASK 0xff000000
+#define A6XX_VPC_CNTL_0_UNKLOC__SHIFT 24
+static inline uint32_t A6XX_VPC_CNTL_0_UNKLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_UNKLOC__SHIFT) & A6XX_VPC_CNTL_0_UNKLOC__MASK;
+}
#define REG_A6XX_VPC_SO_BUF_CNTL 0x00009305
#define A6XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
@@ -4034,15 +5123,65 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
#define A6XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+#define A6XX_VPC_SO_BUF_CNTL_UNK16__MASK 0x000f0000
+#define A6XX_VPC_SO_BUF_CNTL_UNK16__SHIFT 16
+static inline uint32_t A6XX_VPC_SO_BUF_CNTL_UNK16(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_BUF_CNTL_UNK16__SHIFT) & A6XX_VPC_SO_BUF_CNTL_UNK16__MASK;
+}
-#define REG_A6XX_VPC_SO_OVERRIDE 0x00009306
-#define A6XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001
+#define REG_A6XX_VPC_SO_DISABLE 0x00009306
+#define A6XX_VPC_SO_DISABLE_DISABLE 0x00000001
#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
+#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
+
#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
+#define REG_A6XX_VPC_UNKNOWN_9603 0x00009603
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0 0x00009604
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1 0x00009605
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2 0x00009606
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3 0x00009607
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4 0x00009608
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5 0x00009609
+
+#define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800
+
#define REG_A6XX_PC_UNKNOWN_9801 0x00009801
+#define A6XX_PC_UNKNOWN_9801_UNK0__MASK 0x000007ff
+#define A6XX_PC_UNKNOWN_9801_UNK0__SHIFT 0
+static inline uint32_t A6XX_PC_UNKNOWN_9801_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_PC_UNKNOWN_9801_UNK0__SHIFT) & A6XX_PC_UNKNOWN_9801_UNK0__MASK;
+}
+#define A6XX_PC_UNKNOWN_9801_UNK13__MASK 0x00002000
+#define A6XX_PC_UNKNOWN_9801_UNK13__SHIFT 13
+static inline uint32_t A6XX_PC_UNKNOWN_9801_UNK13(uint32_t val)
+{
+ return ((val) << A6XX_PC_UNKNOWN_9801_UNK13__SHIFT) & A6XX_PC_UNKNOWN_9801_UNK13__MASK;
+}
+
+#define REG_A6XX_PC_TESS_CNTL 0x00009802
+#define A6XX_PC_TESS_CNTL_SPACING__MASK 0x00000003
+#define A6XX_PC_TESS_CNTL_SPACING__SHIFT 0
+static inline uint32_t A6XX_PC_TESS_CNTL_SPACING(enum a6xx_tess_spacing val)
+{
+ return ((val) << A6XX_PC_TESS_CNTL_SPACING__SHIFT) & A6XX_PC_TESS_CNTL_SPACING__MASK;
+}
+#define A6XX_PC_TESS_CNTL_OUTPUT__MASK 0x0000000c
+#define A6XX_PC_TESS_CNTL_OUTPUT__SHIFT 2
+static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val)
+{
+ return ((val) << A6XX_PC_TESS_CNTL_OUTPUT__SHIFT) & A6XX_PC_TESS_CNTL_OUTPUT__MASK;
+}
#define REG_A6XX_PC_RESTART_INDEX 0x00009803
@@ -4050,43 +5189,244 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
#define REG_A6XX_PC_UNKNOWN_9805 0x00009805
-#define REG_A6XX_PC_UNKNOWN_9806 0x00009806
+#define REG_A6XX_PC_PRIMID_PASSTHRU 0x00009806
-#define REG_A6XX_PC_UNKNOWN_9980 0x00009980
+#define REG_A6XX_PC_DRAW_CMD 0x00009840
+#define A6XX_PC_DRAW_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_PC_DRAW_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_PC_DRAW_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_DRAW_CMD_STATE_ID__SHIFT) & A6XX_PC_DRAW_CMD_STATE_ID__MASK;
+}
-#define REG_A6XX_PC_UNKNOWN_9981 0x00009981
+#define REG_A6XX_PC_DISPATCH_CMD 0x00009841
+#define A6XX_PC_DISPATCH_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_PC_DISPATCH_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_PC_DISPATCH_CMD_STATE_ID__MASK;
+}
-#define REG_A6XX_PC_UNKNOWN_9990 0x00009990
+#define REG_A6XX_PC_EVENT_CMD 0x00009842
+#define A6XX_PC_EVENT_CMD_STATE_ID__MASK 0x00ff0000
+#define A6XX_PC_EVENT_CMD_STATE_ID__SHIFT 16
+static inline uint32_t A6XX_PC_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_PC_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_PC_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_PC_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_PC_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_PC_POLYGON_MODE 0x00009981
+#define A6XX_PC_POLYGON_MODE_MODE__MASK 0x00000003
+#define A6XX_PC_POLYGON_MODE_MODE__SHIFT 0
+static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+ return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK;
+}
+
+#define REG_A6XX_PC_UNKNOWN_9980 0x00009980
#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
+#define A6XX_PC_PRIMITIVE_CNTL_0_TESS_UPPER_LEFT_DOMAIN_ORIGIN 0x00000004
+#define A6XX_PC_PRIMITIVE_CNTL_0_UNK3 0x00000008
+
+#define REG_A6XX_PC_VS_OUT_CNTL 0x00009b01
+#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_VS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_VS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_VS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_VS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK;
+}
-#define REG_A6XX_PC_PRIMITIVE_CNTL_1 0x00009b01
-#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK 0x0000007f
-#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT 0
-static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val)
+#define REG_A6XX_PC_GS_OUT_CNTL 0x00009b02
+#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
{
- return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
+ return ((val) << A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_GS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_GS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_GS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_GS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_GS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK;
}
-#define A6XX_PC_PRIMITIVE_CNTL_1_PSIZE 0x00000100
-#define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06
+#define REG_A6XX_PC_PRIMITIVE_CNTL_3 0x00009b03
+
+#define REG_A6XX_PC_DS_OUT_CNTL 0x00009b04
+#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_DS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_DS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_DS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_DS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_DS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_5 0x00009b05
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK 0x00040000
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT 18
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_UNK18(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK;
+}
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_6 0x00009b06
+#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK 0x000007ff
+#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK;
+}
#define REG_A6XX_PC_UNKNOWN_9B07 0x00009b07
+#define REG_A6XX_PC_UNKNOWN_9B08 0x00009b08
+
+#define REG_A6XX_PC_2D_EVENT_CMD 0x00009c00
+#define A6XX_PC_2D_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_PC_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_2D_EVENT_CMD_EVENT__MASK;
+}
+#define A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00
+#define A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT 8
+static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
+
+#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
+
#define REG_A6XX_PC_TESSFACTOR_ADDR_LO 0x00009e08
#define REG_A6XX_PC_TESSFACTOR_ADDR_HI 0x00009e09
+#define REG_A6XX_PC_TESSFACTOR_ADDR 0x00009e08
+#define A6XX_PC_TESSFACTOR_ADDR__MASK 0xffffffff
+#define A6XX_PC_TESSFACTOR_ADDR__SHIFT 0
+static inline uint32_t A6XX_PC_TESSFACTOR_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_PC_TESSFACTOR_ADDR__SHIFT) & A6XX_PC_TESSFACTOR_ADDR__MASK;
+}
+
+#define REG_A6XX_PC_VSTREAM_CONTROL 0x00009e11
+#define A6XX_PC_VSTREAM_CONTROL_UNK0__MASK 0x0000ffff
+#define A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT 0
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT) & A6XX_PC_VSTREAM_CONTROL_UNK0__MASK;
+}
+#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK 0x003f0000
+#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT 16
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK;
+}
+#define A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK 0x07c00000
+#define A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT 22
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_N(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK;
+}
+
+#define REG_A6XX_PC_BIN_PRIM_STRM 0x00009e12
+#define A6XX_PC_BIN_PRIM_STRM__MASK 0xffffffff
+#define A6XX_PC_BIN_PRIM_STRM__SHIFT 0
+static inline uint32_t A6XX_PC_BIN_PRIM_STRM(uint32_t val)
+{
+ return ((val) << A6XX_PC_BIN_PRIM_STRM__SHIFT) & A6XX_PC_BIN_PRIM_STRM__MASK;
+}
+
+#define REG_A6XX_PC_BIN_DRAW_STRM 0x00009e14
+#define A6XX_PC_BIN_DRAW_STRM__MASK 0xffffffff
+#define A6XX_PC_BIN_DRAW_STRM__SHIFT 0
+static inline uint32_t A6XX_PC_BIN_DRAW_STRM(uint32_t val)
+{
+ return ((val) << A6XX_PC_BIN_DRAW_STRM__SHIFT) & A6XX_PC_BIN_DRAW_STRM__MASK;
+}
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_0 0x00009e34
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_1 0x00009e35
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_2 0x00009e36
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_3 0x00009e37
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_4 0x00009e38
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_5 0x00009e39
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_6 0x00009e3a
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_7 0x00009e3b
+
#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
#define REG_A6XX_VFD_CONTROL_0 0x0000a000
-#define A6XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
-#define A6XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
-static inline uint32_t A6XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+#define A6XX_VFD_CONTROL_0_FETCH_CNT__MASK 0x0000003f
+#define A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_0_FETCH_CNT(uint32_t val)
{
- return ((val) << A6XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A6XX_VFD_CONTROL_0_VTXCNT__MASK;
+ return ((val) << A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT) & A6XX_VFD_CONTROL_0_FETCH_CNT__MASK;
+}
+#define A6XX_VFD_CONTROL_0_DECODE_CNT__MASK 0x00003f00
+#define A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_0_DECODE_CNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT) & A6XX_VFD_CONTROL_0_DECODE_CNT__MASK;
}
#define REG_A6XX_VFD_CONTROL_1 0x0000a001
@@ -4110,19 +5450,25 @@ static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
}
#define REG_A6XX_VFD_CONTROL_2 0x0000a002
-#define A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
-#define A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
-static inline uint32_t A6XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+#define A6XX_VFD_CONTROL_2_REGID_HSPATCHID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_2_REGID_HSPATCHID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_HSPATCHID(uint32_t val)
{
- return ((val) << A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_HSPATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_HSPATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK;
}
#define REG_A6XX_VFD_CONTROL_3 0x0000a003
-#define A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
-#define A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
-static inline uint32_t A6XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+#define A6XX_VFD_CONTROL_3_REGID_DSPATCHID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_3_REGID_DSPATCHID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSPATCHID(uint32_t val)
{
- return ((val) << A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_DSPATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSPATCHID__MASK;
}
#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
@@ -4140,15 +5486,24 @@ static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
#define REG_A6XX_VFD_CONTROL_4 0x0000a004
#define REG_A6XX_VFD_CONTROL_5 0x0000a005
+#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT) & A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK;
+}
#define REG_A6XX_VFD_CONTROL_6 0x0000a006
+#define A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU 0x00000001
#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
#define A6XX_VFD_MODE_CNTL_BINNING_PASS 0x00000001
#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
-#define REG_A6XX_VFD_UNKNOWN_A009 0x0000a009
+#define REG_A6XX_VFD_ADD_OFFSET 0x0000a009
+#define A6XX_VFD_ADD_OFFSET_VERTEX 0x00000001
+#define A6XX_VFD_ADD_OFFSET_INSTANCE 0x00000002
#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
@@ -4156,6 +5511,8 @@ static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
static inline uint32_t REG_A6XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
static inline uint32_t REG_A6XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000a011 + 0x4*i0; }
@@ -4173,10 +5530,16 @@ static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
{
return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
}
+#define A6XX_VFD_DECODE_INSTR_OFFSET__MASK 0x0001ffe0
+#define A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT 5
+static inline uint32_t A6XX_VFD_DECODE_INSTR_OFFSET(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT) & A6XX_VFD_DECODE_INSTR_OFFSET__MASK;
+}
#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
-static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_vtx_fmt val)
+static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
}
@@ -4209,12 +5572,44 @@ static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_A0F8 0x0000a0f8
-#define REG_A6XX_SP_PRIMITIVE_CNTL 0x0000a802
-#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
-#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
-static inline uint32_t A6XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
+#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_VS_CTRL_REG0_DIFF_FINE 0x00800000
+#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_VS_BRANCH_COND 0x0000a801
+
+#define REG_A6XX_SP_VS_PRIMITIVE_CNTL 0x0000a802
+#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_OUT(uint32_t val)
{
- return ((val) << A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
+ return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK;
}
static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
@@ -4273,35 +5668,6 @@ static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
}
-#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
-#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
-#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
-#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
-#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
-#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
-static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
-{
- return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
-}
-#define A6XX_SP_VS_CTRL_REG0_VARYING 0x00400000
-#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
-#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
-
#define REG_A6XX_SP_UNKNOWN_A81B 0x0000a81b
#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
@@ -4311,6 +5677,10 @@ static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
#define REG_A6XX_SP_VS_CONFIG 0x0000a823
+#define A6XX_SP_VS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_VS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_VS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_VS_CONFIG_BINDLESS_UBO 0x00000008
#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100
#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00
#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9
@@ -4318,12 +5688,18 @@ static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
{
return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
}
-#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x003e0000
#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17
static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
}
+#define A6XX_SP_VS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_VS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NIBO__SHIFT) & A6XX_SP_VS_CONFIG_NIBO__MASK;
+}
#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
@@ -4353,11 +5729,14 @@ static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
}
#define A6XX_SP_HS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_HS_CTRL_REG0_DIFF_FINE 0x00800000
#define A6XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_HS_CTRL_REG0_MERGEDREGS 0x80000000
#define REG_A6XX_SP_HS_UNKNOWN_A831 0x0000a831
+#define REG_A6XX_SP_HS_UNKNOWN_A833 0x0000a833
+
#define REG_A6XX_SP_HS_OBJ_START_LO 0x0000a834
#define REG_A6XX_SP_HS_OBJ_START_HI 0x0000a835
@@ -4365,6 +5744,10 @@ static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
#define REG_A6XX_SP_HS_CONFIG 0x0000a83b
+#define A6XX_SP_HS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_HS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_HS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_HS_CONFIG_BINDLESS_UBO 0x00000008
#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100
#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00
#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9
@@ -4372,12 +5755,18 @@ static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
{
return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
}
-#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x003e0000
#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17
static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
}
+#define A6XX_SP_HS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_HS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NIBO__SHIFT) & A6XX_SP_HS_CONFIG_NIBO__MASK;
+}
#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
@@ -4407,9 +5796,76 @@ static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
}
#define A6XX_SP_DS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_DS_CTRL_REG0_DIFF_FINE 0x00800000
#define A6XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x80000000
+#define REG_A6XX_SP_DS_PRIMITIVE_CNTL 0x0000a842
+#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_DS_OUT(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+#define A6XX_SP_DS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_DS_UNKNOWN_A85B 0x0000a85b
+
#define REG_A6XX_SP_DS_OBJ_START_LO 0x0000a85c
#define REG_A6XX_SP_DS_OBJ_START_HI 0x0000a85d
@@ -4417,6 +5873,10 @@ static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
#define REG_A6XX_SP_DS_CONFIG 0x0000a863
+#define A6XX_SP_DS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_DS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_DS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_DS_CONFIG_BINDLESS_UBO 0x00000008
#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100
#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00
#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9
@@ -4424,12 +5884,18 @@ static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
{
return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
}
-#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x003e0000
#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17
static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
}
+#define A6XX_SP_DS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_DS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NIBO__SHIFT) & A6XX_SP_DS_CONFIG_NIBO__MASK;
+}
#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
@@ -4459,10 +5925,83 @@ static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
}
#define A6XX_SP_GS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_GS_CTRL_REG0_DIFF_FINE 0x00800000
#define A6XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_GS_CTRL_REG0_MERGEDREGS 0x80000000
-#define REG_A6XX_SP_GS_UNKNOWN_A871 0x0000a871
+#define REG_A6XX_SP_GS_PRIM_SIZE 0x0000a871
+
+#define REG_A6XX_SP_GS_BRANCH_COND 0x0000a872
+
+#define REG_A6XX_SP_GS_PRIMITIVE_CNTL 0x0000a873
+#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK;
+}
+#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_GS_OUT(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_GS_OUT_REG(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+#define A6XX_SP_GS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_GS_VPC_DST(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
#define REG_A6XX_SP_GS_OBJ_START_LO 0x0000a88d
@@ -4471,6 +6010,10 @@ static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
#define REG_A6XX_SP_GS_CONFIG 0x0000a894
+#define A6XX_SP_GS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_GS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_GS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_GS_CONFIG_BINDLESS_UBO 0x00000008
#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100
#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00
#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9
@@ -4478,12 +6021,18 @@ static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
{
return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
}
-#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x003e0000
#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17
static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
}
+#define A6XX_SP_GS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_GS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NIBO__SHIFT) & A6XX_SP_GS_CONFIG_NIBO__MASK;
+}
#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
@@ -4545,9 +6094,12 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
}
#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_DIFF_FINE 0x00800000
#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+#define REG_A6XX_SP_FS_BRANCH_COND 0x0000a981
+
#define REG_A6XX_SP_UNKNOWN_A982 0x0000a982
#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
@@ -4557,6 +6109,7 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
@@ -4620,12 +6173,25 @@ static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
}
#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c
+#define A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001
#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00
#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8
static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
{
return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
}
+#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK 0x00ff0000
+#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK 0xff000000
+#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT 24
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK;
+}
#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d
#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
@@ -4640,19 +6206,101 @@ static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1
static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
}
#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
-#define REG_A6XX_SP_UNKNOWN_A99E 0x0000a99e
+#define REG_A6XX_SP_FS_PREFETCH_CNTL 0x0000a99e
+#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK 0x00000007
+#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK3 0x00000008
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK 0x00000ff0
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT 4
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK4(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_PREFETCH(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+#define A6XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f
+#define A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SRC__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000780
+#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x0000f800
+#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 11
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_DST__MASK 0x003f0000
+#define A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT 16
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_DST(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_DST__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x03c00000
+#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 22
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_HALF 0x04000000
+#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK 0xf8000000
+#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 27
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x000000ff
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK 0x00ff0000
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT 16
+static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK;
+}
#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
+#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__MASK 0x00000001
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__SHIFT 0
+static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE_2K__MASK;
+}
+
+#define REG_A6XX_SP_CS_UNKNOWN_A9B3 0x0000a9b3
+
+#define REG_A6XX_SP_CS_TEX_COUNT 0x0000a9ba
+
#define REG_A6XX_SP_FS_TEX_SAMP_LO 0x0000a9e0
#define REG_A6XX_SP_FS_TEX_SAMP_HI 0x0000a9e1
@@ -4669,6 +6317,10 @@ static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
#define REG_A6XX_SP_CS_TEX_CONST_HI 0x0000a9e7
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
@@ -4706,6 +6358,7 @@ static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
}
#define A6XX_SP_CS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_DIFF_FINE 0x00800000
#define A6XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
@@ -4713,11 +6366,46 @@ static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_CS_OBJ_START_HI 0x0000a9b5
+#define REG_A6XX_SP_CS_CONFIG 0x0000a9bb
+#define A6XX_SP_CS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_CS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_CS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_CS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_CS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_CS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_CS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_CS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NTEX__SHIFT) & A6XX_SP_CS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_CS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_CS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_CS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NSAMP__SHIFT) & A6XX_SP_CS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_CS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_CS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NIBO__SHIFT) & A6XX_SP_CS_CONFIG_NIBO__MASK;
+}
+
#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
+#define REG_A6XX_SP_CS_IBO_LO 0x0000a9f2
+
+#define REG_A6XX_SP_CS_IBO_HI 0x0000a9f3
+
+#define REG_A6XX_SP_CS_IBO_COUNT 0x0000aa00
+
#define REG_A6XX_SP_UNKNOWN_AB00 0x0000ab00
#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
+#define A6XX_SP_FS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_FS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_FS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_FS_CONFIG_BINDLESS_UBO 0x00000008
#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100
#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00
#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9
@@ -4725,18 +6413,48 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
{
return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
}
-#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x003e0000
#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17
static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
{
return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
}
+#define A6XX_SP_FS_CONFIG_NIBO__MASK 0x3fc00000
+#define A6XX_SP_FS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NIBO__SHIFT) & A6XX_SP_FS_CONFIG_NIBO__MASK;
+}
#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
-#define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20
+static inline uint32_t REG_A6XX_SP_BINDLESS_BASE(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
-#define REG_A6XX_SP_UNKNOWN_ACC0 0x0000acc0
+#define REG_A6XX_SP_IBO_LO 0x0000ab1a
+
+#define REG_A6XX_SP_IBO_HI 0x0000ab1b
+
+#define REG_A6XX_SP_IBO_COUNT 0x0000ab20
+
+#define REG_A6XX_SP_2D_DST_FORMAT 0x0000acc0
+#define A6XX_SP_2D_DST_FORMAT_NORM 0x00000001
+#define A6XX_SP_2D_DST_FORMAT_SINT 0x00000002
+#define A6XX_SP_2D_DST_FORMAT_UINT 0x00000004
+#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8
+#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3
+static inline uint32_t A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_2D_DST_FORMAT_SRGB 0x00000800
+#define A6XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000
+#define A6XX_SP_2D_DST_FORMAT_MASK__SHIFT 12
+static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK;
+}
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
@@ -4746,6 +6464,8 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
+#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180
+
#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183
@@ -4767,18 +6487,122 @@ static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples
}
#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR 0x0000b302
+
#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000b302
#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000b303
-#define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304
+#define REG_A6XX_SP_TP_SAMPLE_CONFIG 0x0000b304
+#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_SP_TP_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
+
+#define REG_A6XX_SP_TP_SAMPLE_LOCATION_0 0x0000b305
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_SP_TP_SAMPLE_LOCATION_1 0x0000b306
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
#define REG_A6XX_SP_TP_UNKNOWN_B309 0x0000b309
#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
-static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val)
{
return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
@@ -4795,7 +6619,17 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap va
return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK;
+}
#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000
#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
@@ -4815,6 +6649,8 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+#define REG_A6XX_SP_PS_2D_SRC 0x0000b4c2
+
#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x01fffe00
#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
@@ -4827,6 +6663,22 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS 0x0000b4ca
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b4cc
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH_PITCH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH_ARRAY_PITCH__MASK;
+}
+
#define REG_A6XX_SP_UNKNOWN_B600 0x0000b600
#define REG_A6XX_SP_UNKNOWN_B605 0x0000b605
@@ -4838,6 +6690,7 @@ static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
{
return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
}
+#define A6XX_HLSQ_VS_CNTL_ENABLED 0x00000100
#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
@@ -4846,6 +6699,7 @@ static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
{
return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
}
+#define A6XX_HLSQ_HS_CNTL_ENABLED 0x00000100
#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
@@ -4854,6 +6708,7 @@ static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
{
return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
}
+#define A6XX_HLSQ_DS_CNTL_ENABLED 0x00000100
#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
@@ -4862,6 +6717,13 @@ static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
{
return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
}
+#define A6XX_HLSQ_GS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA 0x0000b823
#define REG_A6XX_HLSQ_UNKNOWN_B980 0x0000b980
@@ -4886,16 +6748,52 @@ static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
{
return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
}
+#define A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK;
+}
#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
-#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
-#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
-static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
{
- return ((val) << A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
}
#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
@@ -4911,6 +6809,15 @@ static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_ENABLED 0x00000100
+
#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
@@ -5011,13 +6918,77 @@ static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
}
+#define REG_A6XX_HLSQ_CS_UNKNOWN_B998 0x0000b998
+
#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a
#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
-#define REG_A6XX_HLSQ_UPDATE_CNTL 0x0000bb08
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA 0x0000b9a3
+
+static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+
+#define REG_A6XX_HLSQ_DRAW_CMD 0x0000bb00
+#define A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DRAW_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_HLSQ_DISPATCH_CMD 0x0000bb01
+#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DISPATCH_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_HLSQ_EVENT_CMD 0x0000bb02
+#define A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK 0x00ff0000
+#define A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_HLSQ_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_HLSQ_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_HLSQ_INVALIDATE_CMD 0x0000bb08
+#define A6XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001
+#define A6XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002
+#define A6XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004
+#define A6XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008
+#define A6XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_SHARED_CONST 0x00080000
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_SHARED_CONST 0x00000100
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x00003e00
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9
+static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK;
+}
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x0007c000
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 14
+static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK;
+}
#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
@@ -5026,8 +6997,28 @@ static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
{
return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
}
+#define A6XX_HLSQ_FS_CNTL_ENABLED 0x00000100
-#define REG_A6XX_HLSQ_UNKNOWN_BB11 0x0000bb11
+#define REG_A6XX_HLSQ_SHARED_CONSTS 0x0000bb11
+#define A6XX_HLSQ_SHARED_CONSTS_ENABLE 0x00000001
+
+static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+
+#define REG_A6XX_HLSQ_2D_EVENT_CMD 0x0000bd80
+#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00
+#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK;
+}
#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00
@@ -5035,6 +7026,38 @@ static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04
+#define REG_A6XX_CP_EVENT_START 0x0000d600
+#define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_EVENT_START_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_EVENT_START_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_EVENT_END 0x0000d601
+#define A6XX_CP_EVENT_END_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_EVENT_END_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_EVENT_END_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_EVENT_END_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_2D_EVENT_START 0x0000d700
+#define A6XX_CP_2D_EVENT_START_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_2D_EVENT_START_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_START_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_2D_EVENT_END 0x0000d701
+#define A6XX_CP_2D_EVENT_END_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_2D_EVENT_END_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_END_STATE_ID__MASK;
+}
+
#define REG_A6XX_TEX_SAMP_0 0x00000000
#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
@@ -5081,6 +7104,7 @@ static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
}
#define REG_A6XX_TEX_SAMP_1 0x00000001
+#define A6XX_TEX_SAMP_1_UNK0 0x00000001
#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
@@ -5104,11 +7128,18 @@ static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
}
#define REG_A6XX_TEX_SAMP_2 0x00000002
-#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xfffffff0
-#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 4
+#define A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK 0x00000003
+#define A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_SAMP_2_REDUCTION_MODE(enum a6xx_reduction_mode val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT) & A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK;
+}
+#define A6XX_TEX_SAMP_2_CHROMA_LINEAR 0x00000020
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xffffff80
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 7
static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
{
- return ((val) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+ return ((val >> 7) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
}
#define REG_A6XX_TEX_SAMP_3 0x00000003
@@ -5151,6 +7182,8 @@ static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_X 0x00010000
+#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_Y 0x00040000
#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
@@ -5159,7 +7192,7 @@ static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
}
#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A6XX_TEX_CONST_0_FMT__SHIFT 22
-static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
+static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_format val)
{
return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
}
@@ -5185,11 +7218,12 @@ static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
}
#define REG_A6XX_TEX_CONST_2 0x00000002
-#define A6XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
-#define A6XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_2_FETCHSIZE(enum a6xx_tex_fetchsize val)
+#define A6XX_TEX_CONST_2_UNK4 0x00000010
+#define A6XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A6XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
{
- return ((val) << A6XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A6XX_TEX_CONST_2_FETCHSIZE__MASK;
+ return ((val) << A6XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A6XX_TEX_CONST_2_PITCHALIGN__MASK;
}
#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
#define A6XX_TEX_CONST_2_PITCH__SHIFT 7
@@ -5203,6 +7237,7 @@ static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
{
return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
}
+#define A6XX_TEX_CONST_2_UNK31 0x80000000
#define REG_A6XX_TEX_CONST_3 0x00000003
#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
@@ -5211,6 +7246,13 @@ static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
{
return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
}
+#define A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
+#define A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
+static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+}
+#define A6XX_TEX_CONST_3_TILE_ALL 0x08000000
#define A6XX_TEX_CONST_3_FLAG 0x10000000
#define REG_A6XX_TEX_CONST_4 0x00000004
@@ -5236,6 +7278,12 @@ static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
}
#define REG_A6XX_TEX_CONST_6 0x00000006
+#define A6XX_TEX_CONST_6_PLANE_PITCH__MASK 0xffffff00
+#define A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT 8
+static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT) & A6XX_TEX_CONST_6_PLANE_PITCH__MASK;
+}
#define REG_A6XX_TEX_CONST_7 0x00000007
#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0
@@ -5254,8 +7302,32 @@ static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
}
#define REG_A6XX_TEX_CONST_9 0x00000009
+#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK 0x0001ffff
+#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
#define REG_A6XX_TEX_CONST_10 0x0000000a
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK 0x0000007f
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK 0x00000f00
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT 8
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK;
+}
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK 0x0000f000
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT 12
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK;
+}
#define REG_A6XX_TEX_CONST_11 0x0000000b
@@ -5267,6 +7339,126 @@ static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
#define REG_A6XX_TEX_CONST_15 0x0000000f
+#define REG_A6XX_IBO_0 0x00000000
+#define A6XX_IBO_0_TILE_MODE__MASK 0x00000003
+#define A6XX_IBO_0_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_IBO_0_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_IBO_0_TILE_MODE__SHIFT) & A6XX_IBO_0_TILE_MODE__MASK;
+}
+#define A6XX_IBO_0_FMT__MASK 0x3fc00000
+#define A6XX_IBO_0_FMT__SHIFT 22
+static inline uint32_t A6XX_IBO_0_FMT(enum a6xx_format val)
+{
+ return ((val) << A6XX_IBO_0_FMT__SHIFT) & A6XX_IBO_0_FMT__MASK;
+}
+
+#define REG_A6XX_IBO_1 0x00000001
+#define A6XX_IBO_1_WIDTH__MASK 0x00007fff
+#define A6XX_IBO_1_WIDTH__SHIFT 0
+static inline uint32_t A6XX_IBO_1_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_IBO_1_WIDTH__SHIFT) & A6XX_IBO_1_WIDTH__MASK;
+}
+#define A6XX_IBO_1_HEIGHT__MASK 0x3fff8000
+#define A6XX_IBO_1_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_IBO_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_IBO_1_HEIGHT__SHIFT) & A6XX_IBO_1_HEIGHT__MASK;
+}
+
+#define REG_A6XX_IBO_2 0x00000002
+#define A6XX_IBO_2_UNK4 0x00000010
+#define A6XX_IBO_2_PITCH__MASK 0x1fffff80
+#define A6XX_IBO_2_PITCH__SHIFT 7
+static inline uint32_t A6XX_IBO_2_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_IBO_2_PITCH__SHIFT) & A6XX_IBO_2_PITCH__MASK;
+}
+#define A6XX_IBO_2_TYPE__MASK 0x60000000
+#define A6XX_IBO_2_TYPE__SHIFT 29
+static inline uint32_t A6XX_IBO_2_TYPE(enum a6xx_tex_type val)
+{
+ return ((val) << A6XX_IBO_2_TYPE__SHIFT) & A6XX_IBO_2_TYPE__MASK;
+}
+#define A6XX_IBO_2_UNK31 0x80000000
+
+#define REG_A6XX_IBO_3 0x00000003
+#define A6XX_IBO_3_ARRAY_PITCH__MASK 0x00003fff
+#define A6XX_IBO_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_IBO_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A6XX_IBO_3_ARRAY_PITCH__SHIFT) & A6XX_IBO_3_ARRAY_PITCH__MASK;
+}
+#define A6XX_IBO_3_UNK27 0x08000000
+#define A6XX_IBO_3_FLAG 0x10000000
+
+#define REG_A6XX_IBO_4 0x00000004
+#define A6XX_IBO_4_BASE_LO__MASK 0xffffffff
+#define A6XX_IBO_4_BASE_LO__SHIFT 0
+static inline uint32_t A6XX_IBO_4_BASE_LO(uint32_t val)
+{
+ return ((val) << A6XX_IBO_4_BASE_LO__SHIFT) & A6XX_IBO_4_BASE_LO__MASK;
+}
+
+#define REG_A6XX_IBO_5 0x00000005
+#define A6XX_IBO_5_BASE_HI__MASK 0x0001ffff
+#define A6XX_IBO_5_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_IBO_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_IBO_5_BASE_HI__SHIFT) & A6XX_IBO_5_BASE_HI__MASK;
+}
+#define A6XX_IBO_5_DEPTH__MASK 0x3ffe0000
+#define A6XX_IBO_5_DEPTH__SHIFT 17
+static inline uint32_t A6XX_IBO_5_DEPTH(uint32_t val)
+{
+ return ((val) << A6XX_IBO_5_DEPTH__SHIFT) & A6XX_IBO_5_DEPTH__MASK;
+}
+
+#define REG_A6XX_IBO_6 0x00000006
+
+#define REG_A6XX_IBO_7 0x00000007
+
+#define REG_A6XX_IBO_8 0x00000008
+
+#define REG_A6XX_IBO_9 0x00000009
+#define A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH__MASK 0x0001ffff
+#define A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_IBO_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_IBO_10 0x0000000a
+#define A6XX_IBO_10_FLAG_BUFFER_PITCH__MASK 0x0000007f
+#define A6XX_IBO_10_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_IBO_10_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_IBO_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_IBO_10_FLAG_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_UBO_0 0x00000000
+#define A6XX_UBO_0_BASE_LO__MASK 0xffffffff
+#define A6XX_UBO_0_BASE_LO__SHIFT 0
+static inline uint32_t A6XX_UBO_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A6XX_UBO_0_BASE_LO__SHIFT) & A6XX_UBO_0_BASE_LO__MASK;
+}
+
+#define REG_A6XX_UBO_1 0x00000001
+#define A6XX_UBO_1_BASE_HI__MASK 0x0001ffff
+#define A6XX_UBO_1_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_UBO_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_UBO_1_BASE_HI__SHIFT) & A6XX_UBO_1_BASE_HI__MASK;
+}
+#define A6XX_UBO_1_SIZE__MASK 0xfffe0000
+#define A6XX_UBO_1_SIZE__SHIFT 17
+static inline uint32_t A6XX_UBO_1_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_UBO_1_SIZE__SHIFT) & A6XX_UBO_1_SIZE__MASK;
+}
+
#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140
#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 096be97ce9f9..b67b38c8fadf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -103,17 +103,45 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
}
-static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{
- struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
- struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- struct msm_gpu *gpu = &adreno_gpu->base;
- int ret;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 perf_index;
+ unsigned long gpu_freq;
+ int ret = 0;
+
+ gpu_freq = dev_pm_opp_get_freq(opp);
+
+ if (gpu_freq == gmu->freq)
+ return;
+
+ for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
+ if (gpu_freq == gmu->gpu_freqs[perf_index])
+ break;
+
+ gmu->current_perf_index = perf_index;
+ gmu->freq = gmu->gpu_freqs[perf_index];
+
+ /*
+ * This can get called from devfreq while the hardware is idle. Don't
+ * bring up the power if it isn't already active
+ */
+ if (pm_runtime_get_if_in_use(gmu->dev) == 0)
+ return;
+
+ if (!gmu->legacy) {
+ a6xx_hfi_set_freq(gmu, perf_index);
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
+ pm_runtime_put(gmu->dev);
+ return;
+ }
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
- ((3 & 0xf) << 28) | index);
+ ((3 & 0xf) << 28) | perf_index);
/*
* Send an invalid index as a vote for the bus bandwidth and let the
@@ -134,37 +162,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
* for now leave it at max so that the performance is nominal.
*/
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
-}
-
-void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
-{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- u32 perf_index = 0;
-
- if (freq == gmu->freq)
- return;
-
- for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
- if (freq == gmu->gpu_freqs[perf_index])
- break;
-
- gmu->current_perf_index = perf_index;
- gmu->freq = gmu->gpu_freqs[perf_index];
-
- /*
- * This can get called from devfreq while the hardware is idle. Don't
- * bring up the power if it isn't already active
- */
- if (pm_runtime_get_if_in_use(gmu->dev) == 0)
- return;
-
- if (gmu->legacy)
- __a6xx_gmu_set_freq(gmu, perf_index);
- else
- a6xx_hfi_set_freq(gmu, perf_index);
-
pm_runtime_put(gmu->dev);
}
@@ -839,6 +836,19 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
a6xx_gmu_rpmh_off(gmu);
}
+static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+{
+ struct dev_pm_opp *gpu_opp;
+ unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
+
+ gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
+ if (IS_ERR_OR_NULL(gpu_opp))
+ return;
+
+ a6xx_gmu_set_freq(gpu, gpu_opp);
+ dev_pm_opp_put(gpu_opp);
+}
+
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
@@ -854,10 +864,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
+ /*
+ * "enable" the GX power domain which won't actually do anything but it
+ * will make sure that the refcounting is correct in case we need to
+ * bring down the GX after a GMU failure
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_get_sync(gmu->gxpd);
+
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
+ pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
return ret;
}
@@ -898,24 +917,14 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->hfi_irq);
/* Set the GPU to the current freq */
- if (gmu->legacy)
- __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
- else
- a6xx_hfi_set_freq(gmu, gmu->current_perf_index);
-
- /*
- * "enable" the GX power domain which won't actually do anything but it
- * will make sure that the refcounting is correct in case we need to
- * bring down the GX after a GMU failure
- */
- if (!IS_ERR_OR_NULL(gmu->gxpd))
- pm_runtime_get(gmu->gxpd);
+ a6xx_gmu_set_initial_freq(gpu, gmu);
out:
/* On failure, shut down the GMU to leave it in a good state */
if (ret) {
disable_irq(gmu->gmu_irq);
a6xx_rpmh_stop(gmu);
+ pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
}
@@ -1121,7 +1130,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return -ENODEV;
mmu = msm_iommu_new(gmu->dev, domain);
- gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
if (IS_ERR(gmu->aspace)) {
iommu_domain_free(domain);
return PTR_ERR(gmu->aspace);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 47df4745db50..c6d2bced8e5d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -127,6 +127,11 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
+static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->rscc + (offset << 2));
+}
+
static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
{
return msm_writel(value, gmu->rscc + (offset << 2));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index 176ae94d9fe6..5a43d3090b0c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -46,24 +48,109 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000
-#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000
-#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000
-#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000
-#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000
-#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000
-#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000
-#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000
-#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000
-#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000
-#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000
-#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK 0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT 23
+static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK;
+}
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK 0x40000000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT 30
+static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK 0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT 22
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT 30
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT 30
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_SET_MASK__MASK 0x00800000
+#define A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT 23
+static inline uint32_t A6XX_GMU_OOB_DCVS_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT 31
+static inline uint32_t A6XX_GMU_OOB_DCVS_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT 31
+static inline uint32_t A6XX_GMU_OOB_DCVS_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_SET_MASK__MASK 0x00040000
+#define A6XX_GMU_OOB_GPU_SET_MASK__SHIFT 18
+static inline uint32_t A6XX_GMU_OOB_GPU_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_SET_MASK__SHIFT) & A6XX_GMU_OOB_GPU_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_CHECK_MASK__MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT 26
+static inline uint32_t A6XX_GMU_OOB_GPU_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT 26
+static inline uint32_t A6XX_GMU_OOB_GPU_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK 0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT 17
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT 25
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT 25
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK;
+}
#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
-#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002
-#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004
-#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000
+#define A6XX_HFI_IRQ_DSGQ_MASK__MASK 0x00000002
+#define A6XX_HFI_IRQ_DSGQ_MASK__SHIFT 1
+static inline uint32_t A6XX_HFI_IRQ_DSGQ_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_DSGQ_MASK__SHIFT) & A6XX_HFI_IRQ_DSGQ_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK 0x00000004
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT 2
+static inline uint32_t A6XX_HFI_IRQ_BLOCKED_MSG_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT) & A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK 0x00800000
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT 23
+static inline uint32_t A6XX_HFI_IRQ_CM3_FAULT_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT) & A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK;
+}
#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 68314dcfce18..c5a3e4d4c007 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -74,7 +74,9 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
u64 iova)
{
OUT_PKT7(ring, CP_REG_TO_MEM, 3);
- OUT_RING(ring, counter | (1 << 30) | (2 << 18));
+ OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
+ CP_REG_TO_MEM_0_CNT(2) |
+ CP_REG_TO_MEM_0_64B);
OUT_RING(ring, lower_32_bits(iova));
OUT_RING(ring, upper_32_bits(iova));
}
@@ -102,10 +104,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* Invalidate CCU depth and color */
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
/* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
@@ -139,7 +141,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
* timestamp is written to the memory and then triggers the interrupt
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
- OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
+ CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
@@ -151,10 +154,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
a6xx_flush(gpu, ring);
}
-static const struct {
- u32 offset;
- u32 value;
-} a6xx_hwcg[] = {
+const struct adreno_reglist a630_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
@@ -259,7 +259,114 @@ static const struct {
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
- {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a640_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a650_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
};
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
@@ -267,26 +374,65 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ const struct adreno_reglist *reg;
unsigned int i;
- u32 val;
+ u32 val, clock_cntl_on;
+
+ if (!adreno_gpu->info->hwcg)
+ return;
+
+ if (adreno_is_a630(adreno_gpu))
+ clock_cntl_on = 0x8aa8aa02;
+ else
+ clock_cntl_on = 0x8aa8aa82;
val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
/* Don't re-program the registers if they are already correct */
- if ((!state && !val) || (state && (val == 0x8aa8aa02)))
+ if ((!state && !val) || (state && (val == clock_cntl_on)))
return;
/* Disable SP clock before programming HWCG registers */
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
- for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
- gpu_write(gpu, a6xx_hwcg[i].offset,
- state ? a6xx_hwcg[i].value : 0);
+ for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
+ gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
- gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
+ gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
+}
+
+static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 lower_bit = 2;
+ u32 amsbc = 0;
+ u32 rgb565_predicator = 0;
+ u32 uavflagprd_inv = 0;
+
+ /* a618 is using the hw default values */
+ if (adreno_is_a618(adreno_gpu))
+ return;
+
+ if (adreno_is_a640(adreno_gpu))
+ amsbc = 1;
+
+ if (adreno_is_a650(adreno_gpu)) {
+ /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
+ lower_bit = 3;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
+ rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+ uavflagprd_inv >> 4 | lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
@@ -406,12 +552,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
- /*
- * enable hardware clockgating
- * For now enable clock gating only for a630
- */
- if (adreno_is_a630(adreno_gpu))
- a6xx_set_hwcg(gpu, true);
+ /* enable hardware clockgating */
+ a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
@@ -478,12 +620,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
- if (adreno_is_a630(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
- }
+ a6xx_set_ubwc_config(gpu);
/* Enable fault detection */
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
@@ -893,8 +1030,8 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
#endif
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a6xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 7239b8b60939..03ba60d5b07f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -63,7 +63,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
-void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index d6023ba8033c..959656ad6987 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -736,7 +736,8 @@ static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_registers *regs,
- struct a6xx_gpu_state_obj *obj)
+ struct a6xx_gpu_state_obj *obj,
+ bool rscc)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -755,9 +756,17 @@ static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
u32 count = RANGE(regs->registers, i);
int j;
- for (j = 0; j < count; j++)
- obj->data[index++] = gmu_read(gmu,
- regs->registers[i] + j);
+ for (j = 0; j < count; j++) {
+ u32 offset = regs->registers[i] + j;
+ u32 val;
+
+ if (rscc)
+ val = gmu_read_rscc(gmu, offset);
+ else
+ val = gmu_read(gmu, offset);
+
+ obj->data[index++] = val;
+ }
}
}
@@ -777,7 +786,9 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
- &a6xx_state->gmu_registers[0]);
+ &a6xx_state->gmu_registers[0], false);
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+ &a6xx_state->gmu_registers[1], true);
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return;
@@ -785,8 +796,8 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
/* Set the fence to ALLOW mode so we can access the registers */
gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
- _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
- &a6xx_state->gmu_registers[1]);
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
+ &a6xx_state->gmu_registers[2], false);
}
#define A6XX_GBIF_REGLIST_SIZE 1
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 24c974c293e5..846fd5b54c23 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -341,10 +341,6 @@ static const u32 a6xx_gmu_cx_registers[] = {
0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
- /* GPU RSCC */
- 0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
- 0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
- 0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
/* GMU AO */
0x9300, 0x9316, 0x9400, 0x9400,
/* GPU CC */
@@ -357,8 +353,16 @@ static const u32 a6xx_gmu_cx_registers[] = {
0xbc00, 0xbc16, 0xbc20, 0xbc27,
};
+static const u32 a6xx_gmu_cx_rscc_registers[] = {
+ /* GPU RSCC */
+ 0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
+ 0x034c, 0x0387, 0x03ec, 0x03ef, 0x03f4, 0x042f, 0x0494, 0x0497,
+ 0x049c, 0x04d7, 0x053c, 0x053f, 0x0544, 0x057f,
+};
+
static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_cx_registers, 0, 0),
+ REGS(a6xx_gmu_cx_rscc_registers, 0, 0),
REGS(a6xx_gmu_gx_registers, 0, 0),
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 9921e632f1ca..ccd44d0418f8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -281,6 +281,76 @@ static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5003c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 3;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x50034;
+ msg->cnoc_cmds_addrs[1] = 0x5007c;
+ msg->cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[0][1] = 0x00000000;
+ msg->cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+ msg->cnoc_cmds_data[1][1] = 0x20000001;
+ msg->cnoc_cmds_data[1][2] = 0x60000001;
+}
+
+static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x50004;
+ msg->ddr_cmds_addrs[2] = 0x5007c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x500a4;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@@ -327,6 +397,10 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(&msg);
+ else if (adreno_is_a640(adreno_gpu))
+ a640_build_bw_table(&msg);
+ else if (adreno_is_a650(adreno_gpu))
+ a650_build_bw_table(&msg);
else
a6xx_build_bw_table(&msg);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 641d3ba477b6..548f532417d1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -159,6 +161,7 @@ enum a3xx_msaa_samples {
MSAA_ONE = 0,
MSAA_TWO = 1,
MSAA_FOUR = 2,
+ MSAA_EIGHT = 3,
};
enum a3xx_threadmode {
@@ -197,6 +200,11 @@ enum a4xx_tess_spacing {
EVEN_SPACING = 3,
};
+enum a5xx_address_mode {
+ ADDR_32B = 0,
+ ADDR_64B = 1,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
@@ -446,34 +454,174 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
#define REG_AXXX_CP_STAT 0x0000047f
-#define AXXX_CP_STAT_CP_BUSY 0x80000000
-#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY 0x40000000
-#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY 0x20000000
-#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY 0x10000000
-#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY 0x08000000
-#define AXXX_CP_STAT_ME_BUSY 0x04000000
-#define AXXX_CP_STAT_MIU_WR_C_BUSY 0x02000000
-#define AXXX_CP_STAT_CP_3D_BUSY 0x00800000
-#define AXXX_CP_STAT_CP_NRT_BUSY 0x00400000
-#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY 0x00200000
-#define AXXX_CP_STAT_RCIU_ME_BUSY 0x00100000
-#define AXXX_CP_STAT_RCIU_PFP_BUSY 0x00080000
-#define AXXX_CP_STAT_MEQ_RING_BUSY 0x00040000
-#define AXXX_CP_STAT_PFP_BUSY 0x00020000
-#define AXXX_CP_STAT_ST_QUEUE_BUSY 0x00010000
-#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY 0x00002000
-#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY 0x00001000
-#define AXXX_CP_STAT_RING_QUEUE_BUSY 0x00000800
-#define AXXX_CP_STAT_CSF_BUSY 0x00000400
-#define AXXX_CP_STAT_CSF_ST_BUSY 0x00000200
-#define AXXX_CP_STAT_EVENT_BUSY 0x00000100
-#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY 0x00000080
-#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY 0x00000040
-#define AXXX_CP_STAT_CSF_RING_BUSY 0x00000020
-#define AXXX_CP_STAT_RCIU_BUSY 0x00000010
-#define AXXX_CP_STAT_RBIU_BUSY 0x00000008
-#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY 0x00000004
-#define AXXX_CP_STAT_MIU_RD_REQ_BUSY 0x00000002
+#define AXXX_CP_STAT_CP_BUSY__MASK 0x80000000
+#define AXXX_CP_STAT_CP_BUSY__SHIFT 31
+static inline uint32_t AXXX_CP_STAT_CP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_BUSY__SHIFT) & AXXX_CP_STAT_CP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK 0x40000000
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT 30
+static inline uint32_t AXXX_CP_STAT_VS_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK 0x20000000
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT 29
+static inline uint32_t AXXX_CP_STAT_PS_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK 0x10000000
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT 28
+static inline uint32_t AXXX_CP_STAT_CF_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK 0x08000000
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT 27
+static inline uint32_t AXXX_CP_STAT_RB_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_ME_BUSY__MASK 0x04000000
+#define AXXX_CP_STAT_ME_BUSY__SHIFT 26
+static inline uint32_t AXXX_CP_STAT_ME_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_ME_BUSY__SHIFT) & AXXX_CP_STAT_ME_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_WR_C_BUSY__MASK 0x02000000
+#define AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT 25
+static inline uint32_t AXXX_CP_STAT_MIU_WR_C_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT) & AXXX_CP_STAT_MIU_WR_C_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CP_3D_BUSY__MASK 0x00800000
+#define AXXX_CP_STAT_CP_3D_BUSY__SHIFT 23
+static inline uint32_t AXXX_CP_STAT_CP_3D_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_3D_BUSY__SHIFT) & AXXX_CP_STAT_CP_3D_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CP_NRT_BUSY__MASK 0x00400000
+#define AXXX_CP_STAT_CP_NRT_BUSY__SHIFT 22
+static inline uint32_t AXXX_CP_STAT_CP_NRT_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_NRT_BUSY__SHIFT) & AXXX_CP_STAT_CP_NRT_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK 0x00200000
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT 21
+static inline uint32_t AXXX_CP_STAT_RBIU_SCRATCH_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_ME_BUSY__MASK 0x00100000
+#define AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT 20
+static inline uint32_t AXXX_CP_STAT_RCIU_ME_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_ME_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_PFP_BUSY__MASK 0x00080000
+#define AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT 19
+static inline uint32_t AXXX_CP_STAT_RCIU_PFP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_PFP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MEQ_RING_BUSY__MASK 0x00040000
+#define AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT 18
+static inline uint32_t AXXX_CP_STAT_MEQ_RING_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT) & AXXX_CP_STAT_MEQ_RING_BUSY__MASK;
+}
+#define AXXX_CP_STAT_PFP_BUSY__MASK 0x00020000
+#define AXXX_CP_STAT_PFP_BUSY__SHIFT 17
+static inline uint32_t AXXX_CP_STAT_PFP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_PFP_BUSY__SHIFT) & AXXX_CP_STAT_PFP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_ST_QUEUE_BUSY__MASK 0x00010000
+#define AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT 16
+static inline uint32_t AXXX_CP_STAT_ST_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_ST_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK 0x00002000
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT 13
+static inline uint32_t AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK 0x00001000
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT 12
+static inline uint32_t AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RING_QUEUE_BUSY__MASK 0x00000800
+#define AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT 11
+static inline uint32_t AXXX_CP_STAT_RING_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_RING_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_BUSY__MASK 0x00000400
+#define AXXX_CP_STAT_CSF_BUSY__SHIFT 10
+static inline uint32_t AXXX_CP_STAT_CSF_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_BUSY__SHIFT) & AXXX_CP_STAT_CSF_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_ST_BUSY__MASK 0x00000200
+#define AXXX_CP_STAT_CSF_ST_BUSY__SHIFT 9
+static inline uint32_t AXXX_CP_STAT_CSF_ST_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_ST_BUSY__SHIFT) & AXXX_CP_STAT_CSF_ST_BUSY__MASK;
+}
+#define AXXX_CP_STAT_EVENT_BUSY__MASK 0x00000100
+#define AXXX_CP_STAT_EVENT_BUSY__SHIFT 8
+static inline uint32_t AXXX_CP_STAT_EVENT_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_EVENT_BUSY__SHIFT) & AXXX_CP_STAT_EVENT_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK 0x00000080
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT 7
+static inline uint32_t AXXX_CP_STAT_CSF_INDIRECT2_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK 0x00000040
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT 6
+static inline uint32_t AXXX_CP_STAT_CSF_INDIRECTS_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_RING_BUSY__MASK 0x00000020
+#define AXXX_CP_STAT_CSF_RING_BUSY__SHIFT 5
+static inline uint32_t AXXX_CP_STAT_CSF_RING_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_RING_BUSY__SHIFT) & AXXX_CP_STAT_CSF_RING_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_BUSY__MASK 0x00000010
+#define AXXX_CP_STAT_RCIU_BUSY__SHIFT 4
+static inline uint32_t AXXX_CP_STAT_RCIU_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RBIU_BUSY__MASK 0x00000008
+#define AXXX_CP_STAT_RBIU_BUSY__SHIFT 3
+static inline uint32_t AXXX_CP_STAT_RBIU_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RBIU_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK 0x00000004
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT 2
+static inline uint32_t AXXX_CP_STAT_MIU_RD_RETURN_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK 0x00000002
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT 1
+static inline uint32_t AXXX_CP_STAT_MIU_RD_REQ_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK;
+}
#define AXXX_CP_STAT_MIU_WR_BUSY 0x00000001
#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 7732f03d9e3a..4e84f3c76f4f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -200,6 +200,7 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
+ .hwcg = a630_hwcg,
}, {
.rev = ADRENO_REV(6, 4, 0, ANY_ID),
.revn = 640,
@@ -212,6 +213,7 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
+ .hwcg = a640_hwcg,
}, {
.rev = ADRENO_REV(6, 5, 0, ANY_ID),
.revn = 650,
@@ -224,6 +226,7 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a650_zap.mdt",
+ .hwcg = a650_hwcg,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 89673c7ed473..e23641a5ec84 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -194,7 +194,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- 0xfffffff);
+ 0xffffffff - SZ_16M);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
@@ -895,7 +895,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
return 0;
}
-static int adreno_get_pwrlevels(struct device *dev,
+static void adreno_get_pwrlevels(struct device *dev,
struct msm_gpu *gpu)
{
unsigned long freq = ULONG_MAX;
@@ -930,24 +930,6 @@ static int adreno_get_pwrlevels(struct device *dev,
}
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
-
- /* Check for an interconnect path for the bus */
- gpu->icc_path = of_icc_get(dev, "gfx-mem");
- if (!gpu->icc_path) {
- /*
- * Keep compatbility with device trees that don't have an
- * interconnect-names property.
- */
- gpu->icc_path = of_icc_get(dev, NULL);
- }
- if (IS_ERR(gpu->icc_path))
- gpu->icc_path = NULL;
-
- gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
- if (IS_ERR(gpu->ocmem_icc_path))
- gpu->ocmem_icc_path = NULL;
-
- return 0;
}
int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
@@ -993,9 +975,11 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
{
- struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct adreno_platform_config *config = dev->platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret;
adreno_gpu->funcs = funcs;
adreno_gpu->info = adreno_info(config->rev);
@@ -1007,27 +991,59 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.nr_rings = nr_rings;
- adreno_get_pwrlevels(&pdev->dev, gpu);
+ adreno_get_pwrlevels(dev, gpu);
- pm_runtime_set_autosuspend_delay(&pdev->dev,
+ pm_runtime_set_autosuspend_delay(dev,
adreno_gpu->info->inactive_period);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
- return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+ ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
+ if (ret)
+ return ret;
+
+ /*
+ * The legacy case, before "interconnect-names", only has a
+ * single interconnect path which is equivalent to "gfx-mem"
+ */
+ if (!of_find_property(dev->of_node, "interconnect-names", NULL)) {
+ gpu->icc_path = of_icc_get(dev, NULL);
+ } else {
+ gpu->icc_path = of_icc_get(dev, "gfx-mem");
+ gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
+ }
+
+ if (IS_ERR(gpu->icc_path)) {
+ ret = PTR_ERR(gpu->icc_path);
+ gpu->icc_path = NULL;
+ return ret;
+ }
+
+ if (IS_ERR(gpu->ocmem_icc_path)) {
+ ret = PTR_ERR(gpu->ocmem_icc_path);
+ gpu->ocmem_icc_path = NULL;
+ /* allow -ENODATA, ocmem icc is optional */
+ if (ret != -ENODATA)
+ return ret;
+ }
+
+ return 0;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
+ struct msm_drm_private *priv = gpu->dev->dev_private;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
- icc_put(gpu->icc_path);
- icc_put(gpu->ocmem_icc_path);
+ pm_runtime_disable(&priv->gpu_pdev->dev);
msm_gpu_cleanup(&adreno_gpu->base);
+
+ icc_put(gpu->icc_path);
+ icc_put(gpu->ocmem_icc_path);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 2f5d2c3acc3a..99bb468f5f24 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -68,6 +68,13 @@ struct adreno_gpu_funcs {
int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
};
+struct adreno_reglist {
+ u32 offset;
+ u32 value;
+};
+
+extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[];
+
struct adreno_info {
struct adreno_rev rev;
uint32_t revn;
@@ -78,6 +85,7 @@ struct adreno_info {
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
u32 inactive_period;
+ const struct adreno_reglist *hwcg;
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 79b907ac0b4b..3931eecadaff 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,19 +8,21 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 594 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 90159 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14386 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 65048 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 84226 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112556 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 149461 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 184695 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 11218 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_control_regs.xml ( 4559 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pipe_regs.xml ( 2872 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -54,10 +56,13 @@ enum vgt_event_type {
CACHE_FLUSH_TS = 4,
CONTEXT_DONE = 5,
CACHE_FLUSH = 6,
- HLSQ_FLUSH = 7,
VIZQUERY_START = 7,
+ HLSQ_FLUSH = 7,
VIZQUERY_END = 8,
SC_WAIT_WC = 9,
+ WRITE_PRIMITIVE_COUNTS = 9,
+ START_PRIMITIVE_CTRS = 11,
+ STOP_PRIMITIVE_CTRS = 12,
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
@@ -65,23 +70,31 @@ enum vgt_event_type {
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
+ RB_DONE_TS = 22,
PERFCOUNTER_START = 23,
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
+ WT_DONE_TS = 8,
FLUSH_SO_0 = 17,
FLUSH_SO_1 = 18,
FLUSH_SO_2 = 19,
FLUSH_SO_3 = 20,
PC_CCU_INVALIDATE_DEPTH = 24,
PC_CCU_INVALIDATE_COLOR = 25,
- UNK_1C = 28,
- UNK_1D = 29,
+ PC_CCU_RESOLVE_TS = 26,
+ PC_CCU_FLUSH_DEPTH_TS = 28,
+ PC_CCU_FLUSH_COLOR_TS = 29,
BLIT = 30,
UNK_25 = 37,
LRZ_FLUSH = 38,
+ BLIT_OP_FILL_2D = 39,
+ BLIT_OP_COPY_2D = 40,
+ BLIT_OP_SCALE_2D = 42,
+ CONTEXT_DONE_2D = 43,
UNK_2C = 44,
UNK_2D = 45,
+ CACHE_INVALIDATE = 49,
};
enum pc_di_primtype {
@@ -99,13 +112,45 @@ enum pc_di_primtype {
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
DI_PT_TRISTRIP_ADJ = 13,
+ DI_PT_PATCHES0 = 31,
+ DI_PT_PATCHES1 = 32,
+ DI_PT_PATCHES2 = 33,
+ DI_PT_PATCHES3 = 34,
+ DI_PT_PATCHES4 = 35,
+ DI_PT_PATCHES5 = 36,
+ DI_PT_PATCHES6 = 37,
+ DI_PT_PATCHES7 = 38,
+ DI_PT_PATCHES8 = 39,
+ DI_PT_PATCHES9 = 40,
+ DI_PT_PATCHES10 = 41,
+ DI_PT_PATCHES11 = 42,
+ DI_PT_PATCHES12 = 43,
+ DI_PT_PATCHES13 = 44,
+ DI_PT_PATCHES14 = 45,
+ DI_PT_PATCHES15 = 46,
+ DI_PT_PATCHES16 = 47,
+ DI_PT_PATCHES17 = 48,
+ DI_PT_PATCHES18 = 49,
+ DI_PT_PATCHES19 = 50,
+ DI_PT_PATCHES20 = 51,
+ DI_PT_PATCHES21 = 52,
+ DI_PT_PATCHES22 = 53,
+ DI_PT_PATCHES23 = 54,
+ DI_PT_PATCHES24 = 55,
+ DI_PT_PATCHES25 = 56,
+ DI_PT_PATCHES26 = 57,
+ DI_PT_PATCHES27 = 58,
+ DI_PT_PATCHES28 = 59,
+ DI_PT_PATCHES29 = 60,
+ DI_PT_PATCHES30 = 61,
+ DI_PT_PATCHES31 = 62,
};
enum pc_di_src_sel {
DI_SRC_SEL_DMA = 0,
DI_SRC_SEL_IMMEDIATE = 1,
DI_SRC_SEL_AUTO_INDEX = 2,
- DI_SRC_SEL_RESERVED = 3,
+ DI_SRC_SEL_AUTO_XFB = 3,
};
enum pc_di_face_cull_sel {
@@ -143,6 +188,7 @@ enum adreno_pm4_type3_packets {
CP_PREEMPT_ENABLE = 28,
CP_PREEMPT_TOKEN = 30,
CP_INDIRECT_BUFFER = 63,
+ CP_INDIRECT_BUFFER_CHAIN = 87,
CP_INDIRECT_BUFFER_PFD = 55,
CP_WAIT_FOR_IDLE = 38,
CP_WAIT_REG_MEM = 60,
@@ -199,6 +245,7 @@ enum adreno_pm4_type3_packets {
CP_DRAW_INDX_OFFSET = 56,
CP_DRAW_INDIRECT = 40,
CP_DRAW_INDX_INDIRECT = 41,
+ CP_DRAW_INDIRECT_MULTI = 42,
CP_DRAW_AUTO = 36,
CP_UNKNOWN_19 = 25,
CP_UNKNOWN_1A = 26,
@@ -232,6 +279,7 @@ enum adreno_pm4_type3_packets {
CP_SET_MODE = 99,
CP_LOAD_STATE6_GEOM = 50,
CP_LOAD_STATE6_FRAG = 52,
+ CP_LOAD_STATE6 = 54,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -241,9 +289,14 @@ enum adreno_pm4_type3_packets {
IN_INCR_UPDT_CONST = 86,
IN_INCR_UPDT_INSTR = 87,
PKT4 = 4,
- CP_UNK_A6XX_14 = 20,
- CP_UNK_A6XX_36 = 54,
- CP_UNK_A6XX_55 = 85,
+ CP_SCRATCH_WRITE = 76,
+ CP_REG_TO_MEM_OFFSET_MEM = 116,
+ CP_REG_TO_MEM_OFFSET_REG = 114,
+ CP_WAIT_MEM_GTE = 20,
+ CP_WAIT_TWO_REGS = 112,
+ CP_MEMCPY = 117,
+ CP_SET_BIN_DATA5_OFFSET = 46,
+ CP_SET_CTXSWITCH_IB = 85,
CP_REG_WRITE = 109,
};
@@ -292,6 +345,7 @@ enum a4xx_state_block {
enum a4xx_state_type {
ST4_SHADER = 0,
ST4_CONSTANTS = 1,
+ ST4_UBO = 2,
};
enum a4xx_state_src {
@@ -312,18 +366,22 @@ enum a6xx_state_block {
SB6_GS_SHADER = 11,
SB6_FS_SHADER = 12,
SB6_CS_SHADER = 13,
- SB6_SSBO = 14,
- SB6_CS_SSBO = 15,
+ SB6_IBO = 14,
+ SB6_CS_IBO = 15,
};
enum a6xx_state_type {
ST6_SHADER = 0,
ST6_CONSTANTS = 1,
+ ST6_UBO = 2,
+ ST6_IBO = 3,
};
enum a6xx_state_src {
SS6_DIRECT = 0,
+ SS6_BINDLESS = 1,
SS6_INDIRECT = 2,
+ SS6_UBO = 3,
};
enum a4xx_index_size {
@@ -332,6 +390,17 @@ enum a4xx_index_size {
INDEX4_SIZE_32_BIT = 2,
};
+enum a6xx_patch_type {
+ TESS_QUADS = 0,
+ TESS_TRIANGLES = 1,
+ TESS_ISOLINES = 2,
+};
+
+enum a6xx_draw_indirect_opcode {
+ INDIRECT_OP_NORMAL = 2,
+ INDIRECT_OP_INDEXED = 4,
+};
+
enum cp_cond_function {
WRITE_ALWAYS = 0,
WRITE_LT = 1,
@@ -361,9 +430,15 @@ enum a6xx_render_mode {
RM6_BYPASS = 1,
RM6_BINNING = 2,
RM6_GMEM = 4,
- RM6_BLIT2D = 5,
+ RM6_ENDVIS = 5,
RM6_RESOLVE = 6,
+ RM6_YIELD = 7,
+ RM6_COMPUTE = 8,
RM6_BLIT2DSCALE = 12,
+ RM6_IB1LIST_START = 13,
+ RM6_IB1LIST_END = 14,
+ RM6_IFPC_ENABLE = 256,
+ RM6_IFPC_DISABLE = 257,
};
enum pseudo_reg {
@@ -374,6 +449,25 @@ enum pseudo_reg {
COUNTER = 4,
};
+enum compare_mode {
+ PRED_TEST = 1,
+ REG_COMPARE = 2,
+ RENDER_MODE = 3,
+};
+
+enum ctxswitch_ib {
+ RESTORE_IB = 0,
+ YIELD_RESTORE_IB = 1,
+ SAVE_IB = 2,
+ RB_SAVE_IB = 3,
+};
+
+enum reg_tracker {
+ TRACK_CNTL_REG = 1,
+ TRACK_RENDER_CNTL = 2,
+ UNK_EVENT_WRITE = 4,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -469,7 +563,7 @@ static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
{
return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
}
-#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x00004000
+#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x0000c000
#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14
static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
{
@@ -510,6 +604,8 @@ static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
}
+#define REG_CP_LOAD_STATE6_EXT_SRC_ADDR 0x00000001
+
#define REG_CP_DRAW_INDX_0 0x00000000
#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
@@ -653,12 +749,14 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val
{
return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_TESS_MODE__MASK 0x01f00000
-#define CP_DRAW_INDX_OFFSET_0_TESS_MODE__SHIFT 20
-static inline uint32_t CP_DRAW_INDX_OFFSET_0_TESS_MODE(uint32_t val)
+#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK 0x00003000
+#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(enum a6xx_patch_type val)
{
- return ((val) << CP_DRAW_INDX_OFFSET_0_TESS_MODE__SHIFT) & CP_DRAW_INDX_OFFSET_0_TESS_MODE__MASK;
+ return ((val) << CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK;
}
+#define CP_DRAW_INDX_OFFSET_0_GS_ENABLE 0x00010000
+#define CP_DRAW_INDX_OFFSET_0_TESS_ENABLE 0x00020000
#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff
@@ -677,6 +775,39 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
}
#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
+#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_3_FIRST_INDX(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT) & CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK;
+}
+
+
+#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_INDX_BASE 0x00000004
+
+#define REG_CP_DRAW_INDX_OFFSET_6 0x00000006
+#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK;
+}
#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
@@ -719,12 +850,15 @@ static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size v
{
return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
}
-#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK 0x01f00000
-#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT 20
-static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_TESS_MODE(uint32_t val)
+#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK 0x00003000
+#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
{
- return ((val) << A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK;
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK;
}
+#define A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE 0x00010000
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE 0x00020000
+
#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
@@ -735,6 +869,14 @@ static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
}
+#define REG_A5XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK;
+}
+
#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002
#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff
#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0
@@ -743,6 +885,8 @@ static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
}
+#define REG_A5XX_CP_DRAW_INDIRECT_INDIRECT 0x00000001
+
#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000
#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0
@@ -768,12 +912,14 @@ static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_s
{
return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
}
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK 0x01f00000
-#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT 20
-static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE(uint32_t val)
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK 0x00003000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
{
- return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK;
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK;
}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE 0x00010000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE 0x00020000
#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
@@ -817,6 +963,8 @@ static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
}
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE 0x00000001
+
#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff
#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0
@@ -841,6 +989,84 @@ static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
}
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT 0x00000004
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_0 0x00000000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK 0x0000003f
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK 0x000000c0
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK 0x00000300
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT 8
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK 0x00000c00
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK 0x00003000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_GS_ENABLE 0x00010000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_TESS_ENABLE 0x00020000
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_1 0x00000001
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK 0x0000000f
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(enum a6xx_draw_indirect_opcode val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK 0x003fff00
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT 8
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK;
+}
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_2 0x00000002
+#define A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__MASK 0xffffffff
+#define A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT(uint32_t val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_2_DRAW_COUNT__MASK;
+}
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_ADDRESS_0 0x00000003
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_5 0x00000005
+#define A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__MASK 0xffffffff
+#define A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0(uint32_t val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_5_PARAM_0__MASK;
+}
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000006
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_8 0x00000008
+#define A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__MASK 0xffffffff
+#define A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE(uint32_t val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_8_STRIDE__MASK;
+}
+
static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
@@ -854,12 +1080,9 @@ static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
-#define CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK 0x00f00000
-#define CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT 20
-static inline uint32_t CP_SET_DRAW_STATE__0_ENABLE_MASK(uint32_t val)
-{
- return ((val) << CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT) & CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK;
-}
+#define CP_SET_DRAW_STATE__0_BINNING 0x00100000
+#define CP_SET_DRAW_STATE__0_GMEM 0x00200000
+#define CP_SET_DRAW_STATE__0_SYSMEM 0x00400000
#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
@@ -976,30 +1199,101 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
}
#define REG_CP_SET_BIN_DATA5_5 0x00000005
-#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO(uint32_t val)
+#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO(uint32_t val)
{
- return ((val) << CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK;
+ return ((val) << CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK;
}
#define REG_CP_SET_BIN_DATA5_6 0x00000006
-#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO(uint32_t val)
+#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT) & CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_0 0x00000000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK 0x003f0000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT 16
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK;
+}
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK 0x07c00000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT 22
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_N(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_1 0x00000001
+#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_2 0x00000002
+#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_3 0x00000003
+#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK;
+}
+
+#define REG_CP_REG_RMW_0 0x00000000
+#define CP_REG_RMW_0_DST_REG__MASK 0x0003ffff
+#define CP_REG_RMW_0_DST_REG__SHIFT 0
+static inline uint32_t CP_REG_RMW_0_DST_REG(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_0_DST_REG__SHIFT) & CP_REG_RMW_0_DST_REG__MASK;
+}
+#define CP_REG_RMW_0_ROTATE__MASK 0x1f000000
+#define CP_REG_RMW_0_ROTATE__SHIFT 24
+static inline uint32_t CP_REG_RMW_0_ROTATE(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_0_ROTATE__SHIFT) & CP_REG_RMW_0_ROTATE__MASK;
+}
+#define CP_REG_RMW_0_SRC1_ADD 0x20000000
+#define CP_REG_RMW_0_SRC1_IS_REG 0x40000000
+#define CP_REG_RMW_0_SRC0_IS_REG 0x80000000
+
+#define REG_CP_REG_RMW_1 0x00000001
+#define CP_REG_RMW_1_SRC0__MASK 0xffffffff
+#define CP_REG_RMW_1_SRC0__SHIFT 0
+static inline uint32_t CP_REG_RMW_1_SRC0(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_1_SRC0__SHIFT) & CP_REG_RMW_1_SRC0__MASK;
+}
+
+#define REG_CP_REG_RMW_2 0x00000002
+#define CP_REG_RMW_2_SRC1__MASK 0xffffffff
+#define CP_REG_RMW_2_SRC1__SHIFT 0
+static inline uint32_t CP_REG_RMW_2_SRC1(uint32_t val)
{
- return ((val) << CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK;
+ return ((val) << CP_REG_RMW_2_SRC1__SHIFT) & CP_REG_RMW_2_SRC1__MASK;
}
#define REG_CP_REG_TO_MEM_0 0x00000000
-#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
+#define CP_REG_TO_MEM_0_REG__MASK 0x0003ffff
#define CP_REG_TO_MEM_0_REG__SHIFT 0
static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
{
return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
}
-#define CP_REG_TO_MEM_0_CNT__MASK 0x3ff80000
-#define CP_REG_TO_MEM_0_CNT__SHIFT 19
+#define CP_REG_TO_MEM_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_0_CNT__SHIFT 18
static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
{
return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
@@ -1023,8 +1317,97 @@ static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
}
+#define REG_CP_REG_TO_MEM_OFFSET_REG_0 0x00000000
+#define CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT 18
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_0_64B 0x40000000
+#define CP_REG_TO_MEM_OFFSET_REG_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_1 0x00000001
+#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_2 0x00000002
+#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_3 0x00000003
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0_SCRATCH 0x00080000
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_0 0x00000000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT 18
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_1 0x00000001
+#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_2 0x00000002
+#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_3 0x00000003
+#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_4 0x00000004
+#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK;
+}
+
#define REG_CP_MEM_TO_REG_0 0x00000000
-#define CP_MEM_TO_REG_0_REG__MASK 0x0000ffff
+#define CP_MEM_TO_REG_0_REG__MASK 0x0003ffff
#define CP_MEM_TO_REG_0_REG__SHIFT 0
static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
{
@@ -1036,8 +1419,8 @@ static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
{
return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
}
-#define CP_MEM_TO_REG_0_64B 0x40000000
-#define CP_MEM_TO_REG_0_ACCUMULATE 0x80000000
+#define CP_MEM_TO_REG_0_SHIFT_BY_2 0x40000000
+#define CP_MEM_TO_REG_0_UNK31 0x80000000
#define REG_CP_MEM_TO_REG_1 0x00000001
#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff
@@ -1060,6 +1443,113 @@ static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
#define CP_MEM_TO_MEM_0_NEG_C 0x00000004
#define CP_MEM_TO_MEM_0_DOUBLE 0x20000000
+#define CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES 0x40000000
+#define CP_MEM_TO_MEM_0_UNK31 0x80000000
+
+#define REG_CP_MEMCPY_0 0x00000000
+#define CP_MEMCPY_0_DWORDS__MASK 0xffffffff
+#define CP_MEMCPY_0_DWORDS__SHIFT 0
+static inline uint32_t CP_MEMCPY_0_DWORDS(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_0_DWORDS__SHIFT) & CP_MEMCPY_0_DWORDS__MASK;
+}
+
+#define REG_CP_MEMCPY_1 0x00000001
+#define CP_MEMCPY_1_SRC_LO__MASK 0xffffffff
+#define CP_MEMCPY_1_SRC_LO__SHIFT 0
+static inline uint32_t CP_MEMCPY_1_SRC_LO(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_1_SRC_LO__SHIFT) & CP_MEMCPY_1_SRC_LO__MASK;
+}
+
+#define REG_CP_MEMCPY_2 0x00000002
+#define CP_MEMCPY_2_SRC_HI__MASK 0xffffffff
+#define CP_MEMCPY_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEMCPY_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_2_SRC_HI__SHIFT) & CP_MEMCPY_2_SRC_HI__MASK;
+}
+
+#define REG_CP_MEMCPY_3 0x00000003
+#define CP_MEMCPY_3_DST_LO__MASK 0xffffffff
+#define CP_MEMCPY_3_DST_LO__SHIFT 0
+static inline uint32_t CP_MEMCPY_3_DST_LO(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_3_DST_LO__SHIFT) & CP_MEMCPY_3_DST_LO__MASK;
+}
+
+#define REG_CP_MEMCPY_4 0x00000004
+#define CP_MEMCPY_4_DST_HI__MASK 0xffffffff
+#define CP_MEMCPY_4_DST_HI__SHIFT 0
+static inline uint32_t CP_MEMCPY_4_DST_HI(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_4_DST_HI__SHIFT) & CP_MEMCPY_4_DST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_SCRATCH_0 0x00000000
+#define CP_REG_TO_SCRATCH_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_SCRATCH_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_SCRATCH_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_REG__SHIFT) & CP_REG_TO_SCRATCH_0_REG__MASK;
+}
+#define CP_REG_TO_SCRATCH_0_SCRATCH__MASK 0x00700000
+#define CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_REG_TO_SCRATCH_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT) & CP_REG_TO_SCRATCH_0_SCRATCH__MASK;
+}
+#define CP_REG_TO_SCRATCH_0_CNT__MASK 0x07000000
+#define CP_REG_TO_SCRATCH_0_CNT__SHIFT 24
+static inline uint32_t CP_REG_TO_SCRATCH_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_CNT__SHIFT) & CP_REG_TO_SCRATCH_0_CNT__MASK;
+}
+
+#define REG_CP_SCRATCH_TO_REG_0 0x00000000
+#define CP_SCRATCH_TO_REG_0_REG__MASK 0x0003ffff
+#define CP_SCRATCH_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_SCRATCH_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_REG__SHIFT) & CP_SCRATCH_TO_REG_0_REG__MASK;
+}
+#define CP_SCRATCH_TO_REG_0_UNK18 0x00040000
+#define CP_SCRATCH_TO_REG_0_SCRATCH__MASK 0x00700000
+#define CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_SCRATCH_TO_REG_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT) & CP_SCRATCH_TO_REG_0_SCRATCH__MASK;
+}
+#define CP_SCRATCH_TO_REG_0_CNT__MASK 0x07000000
+#define CP_SCRATCH_TO_REG_0_CNT__SHIFT 24
+static inline uint32_t CP_SCRATCH_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_CNT__SHIFT) & CP_SCRATCH_TO_REG_0_CNT__MASK;
+}
+
+#define REG_CP_SCRATCH_WRITE_0 0x00000000
+#define CP_SCRATCH_WRITE_0_SCRATCH__MASK 0x00700000
+#define CP_SCRATCH_WRITE_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_SCRATCH_WRITE_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_WRITE_0_SCRATCH__SHIFT) & CP_SCRATCH_WRITE_0_SCRATCH__MASK;
+}
+
+#define REG_CP_MEM_WRITE_0 0x00000000
+#define CP_MEM_WRITE_0_ADDR_LO__MASK 0xffffffff
+#define CP_MEM_WRITE_0_ADDR_LO__SHIFT 0
+static inline uint32_t CP_MEM_WRITE_0_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_MEM_WRITE_0_ADDR_LO__SHIFT) & CP_MEM_WRITE_0_ADDR_LO__MASK;
+}
+
+#define REG_CP_MEM_WRITE_1 0x00000001
+#define CP_MEM_WRITE_1_ADDR_HI__MASK 0xffffffff
+#define CP_MEM_WRITE_1_ADDR_HI__SHIFT 0
+static inline uint32_t CP_MEM_WRITE_1_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_WRITE_1_ADDR_HI__SHIFT) & CP_MEM_WRITE_1_ADDR_HI__MASK;
+}
#define REG_CP_COND_WRITE_0 0x00000000
#define CP_COND_WRITE_0_FUNCTION__MASK 0x00000007
@@ -1118,7 +1608,9 @@ static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val)
{
return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK;
}
+#define CP_COND_WRITE5_0_SIGNED_COMPARE 0x00000008
#define CP_COND_WRITE5_0_POLL_MEMORY 0x00000010
+#define CP_COND_WRITE5_0_POLL_SCRATCH 0x00000020
#define CP_COND_WRITE5_0_WRITE_MEMORY 0x00000100
#define REG_CP_COND_WRITE5_1 0x00000001
@@ -1177,6 +1669,114 @@ static inline uint32_t CP_COND_WRITE5_7_WRITE_DATA(uint32_t val)
return ((val) << CP_COND_WRITE5_7_WRITE_DATA__SHIFT) & CP_COND_WRITE5_7_WRITE_DATA__MASK;
}
+#define REG_CP_WAIT_MEM_GTE_0 0x00000000
+#define CP_WAIT_MEM_GTE_0_RESERVED__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_0_RESERVED__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_0_RESERVED(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_0_RESERVED__SHIFT) & CP_WAIT_MEM_GTE_0_RESERVED__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_1 0x00000001
+#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_1_POLL_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_2 0x00000002
+#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_2_POLL_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_3 0x00000003
+#define CP_WAIT_MEM_GTE_3_REF__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_3_REF__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_3_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_3_REF__SHIFT) & CP_WAIT_MEM_GTE_3_REF__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_0 0x00000000
+#define CP_WAIT_REG_MEM_0_FUNCTION__MASK 0x00000007
+#define CP_WAIT_REG_MEM_0_FUNCTION__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_0_FUNCTION(enum cp_cond_function val)
+{
+ return ((val) << CP_WAIT_REG_MEM_0_FUNCTION__SHIFT) & CP_WAIT_REG_MEM_0_FUNCTION__MASK;
+}
+#define CP_WAIT_REG_MEM_0_SIGNED_COMPARE 0x00000008
+#define CP_WAIT_REG_MEM_0_POLL_MEMORY 0x00000010
+#define CP_WAIT_REG_MEM_0_POLL_SCRATCH 0x00000020
+#define CP_WAIT_REG_MEM_0_WRITE_MEMORY 0x00000100
+
+#define REG_CP_WAIT_REG_MEM_1 0x00000001
+#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_1_POLL_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_2 0x00000002
+#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_2_POLL_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_3 0x00000003
+#define CP_WAIT_REG_MEM_3_REF__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_3_REF__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_3_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_3_REF__SHIFT) & CP_WAIT_REG_MEM_3_REF__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_4 0x00000004
+#define CP_WAIT_REG_MEM_4_MASK__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_4_MASK__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_4_MASK(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_4_MASK__SHIFT) & CP_WAIT_REG_MEM_4_MASK__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_5 0x00000005
+#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT) & CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_0 0x00000000
+#define CP_WAIT_TWO_REGS_0_REG0__MASK 0x0003ffff
+#define CP_WAIT_TWO_REGS_0_REG0__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_0_REG0(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_0_REG0__SHIFT) & CP_WAIT_TWO_REGS_0_REG0__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_1 0x00000001
+#define CP_WAIT_TWO_REGS_1_REG1__MASK 0x0003ffff
+#define CP_WAIT_TWO_REGS_1_REG1__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_1_REG1(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_1_REG1__SHIFT) & CP_WAIT_TWO_REGS_1_REG1__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_2 0x00000002
+#define CP_WAIT_TWO_REGS_2_REF__MASK 0xffffffff
+#define CP_WAIT_TWO_REGS_2_REF__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_2_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_2_REF__SHIFT) & CP_WAIT_TWO_REGS_2_REF__MASK;
+}
+
#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
@@ -1329,6 +1929,7 @@ static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
}
#define CP_EVENT_WRITE_0_TIMESTAMP 0x40000000
+#define CP_EVENT_WRITE_0_IRQ 0x80000000
#define REG_CP_EVENT_WRITE_1 0x00000001
#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff
@@ -1506,61 +2107,209 @@ static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
}
-#define REG_A2XX_CP_SET_MARKER_0 0x00000000
-#define A2XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
-#define A2XX_CP_SET_MARKER_0_MARKER__SHIFT 0
-static inline uint32_t A2XX_CP_SET_MARKER_0_MARKER(uint32_t val)
+#define REG_A6XX_CP_SET_MARKER_0 0x00000000
+#define A6XX_CP_SET_MARKER_0_MODE__MASK 0x000001ff
+#define A6XX_CP_SET_MARKER_0_MODE__SHIFT 0
+static inline uint32_t A6XX_CP_SET_MARKER_0_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A6XX_CP_SET_MARKER_0_MODE__SHIFT) & A6XX_CP_SET_MARKER_0_MODE__MASK;
+}
+#define A6XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
+#define A6XX_CP_SET_MARKER_0_MARKER__SHIFT 0
+static inline uint32_t A6XX_CP_SET_MARKER_0_MARKER(enum a6xx_render_mode val)
+{
+ return ((val) << A6XX_CP_SET_MARKER_0_MARKER__SHIFT) & A6XX_CP_SET_MARKER_0_MARKER__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
+#define A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A6XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
+#define A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A6XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+}
+
+#define REG_A6XX_CP_REG_TEST_0 0x00000000
+#define A6XX_CP_REG_TEST_0_REG__MASK 0x0003ffff
+#define A6XX_CP_REG_TEST_0_REG__SHIFT 0
+static inline uint32_t A6XX_CP_REG_TEST_0_REG(uint32_t val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_REG__SHIFT) & A6XX_CP_REG_TEST_0_REG__MASK;
+}
+#define A6XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
+#define A6XX_CP_REG_TEST_0_BIT__SHIFT 20
+static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_BIT__SHIFT) & A6XX_CP_REG_TEST_0_BIT__MASK;
+}
+#define A6XX_CP_REG_TEST_0_WAIT_FOR_ME 0x02000000
+
+#define REG_CP_COND_REG_EXEC_0 0x00000000
+#define CP_COND_REG_EXEC_0_REG0__MASK 0x0003ffff
+#define CP_COND_REG_EXEC_0_REG0__SHIFT 0
+static inline uint32_t CP_COND_REG_EXEC_0_REG0(uint32_t val)
+{
+ return ((val) << CP_COND_REG_EXEC_0_REG0__SHIFT) & CP_COND_REG_EXEC_0_REG0__MASK;
+}
+#define CP_COND_REG_EXEC_0_BINNING 0x02000000
+#define CP_COND_REG_EXEC_0_GMEM 0x04000000
+#define CP_COND_REG_EXEC_0_SYSMEM 0x08000000
+#define CP_COND_REG_EXEC_0_MODE__MASK 0xf0000000
+#define CP_COND_REG_EXEC_0_MODE__SHIFT 28
+static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val)
+{
+ return ((val) << CP_COND_REG_EXEC_0_MODE__SHIFT) & CP_COND_REG_EXEC_0_MODE__MASK;
+}
+
+#define REG_CP_COND_REG_EXEC_1 0x00000001
+#define CP_COND_REG_EXEC_1_DWORDS__MASK 0xffffffff
+#define CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
+static inline uint32_t CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
+{
+ return ((val) << CP_COND_REG_EXEC_1_DWORDS__SHIFT) & CP_COND_REG_EXEC_1_DWORDS__MASK;
+}
+
+#define REG_CP_COND_EXEC_0 0x00000000
+#define CP_COND_EXEC_0_ADDR0_LO__MASK 0xffffffff
+#define CP_COND_EXEC_0_ADDR0_LO__SHIFT 0
+static inline uint32_t CP_COND_EXEC_0_ADDR0_LO(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_0_ADDR0_LO__SHIFT) & CP_COND_EXEC_0_ADDR0_LO__MASK;
+}
+
+#define REG_CP_COND_EXEC_1 0x00000001
+#define CP_COND_EXEC_1_ADDR0_HI__MASK 0xffffffff
+#define CP_COND_EXEC_1_ADDR0_HI__SHIFT 0
+static inline uint32_t CP_COND_EXEC_1_ADDR0_HI(uint32_t val)
{
- return ((val) << A2XX_CP_SET_MARKER_0_MARKER__SHIFT) & A2XX_CP_SET_MARKER_0_MARKER__MASK;
+ return ((val) << CP_COND_EXEC_1_ADDR0_HI__SHIFT) & CP_COND_EXEC_1_ADDR0_HI__MASK;
}
-#define A2XX_CP_SET_MARKER_0_MODE__MASK 0x0000000f
-#define A2XX_CP_SET_MARKER_0_MODE__SHIFT 0
-static inline uint32_t A2XX_CP_SET_MARKER_0_MODE(enum a6xx_render_mode val)
+
+#define REG_CP_COND_EXEC_2 0x00000002
+#define CP_COND_EXEC_2_ADDR1_LO__MASK 0xffffffff
+#define CP_COND_EXEC_2_ADDR1_LO__SHIFT 0
+static inline uint32_t CP_COND_EXEC_2_ADDR1_LO(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_2_ADDR1_LO__SHIFT) & CP_COND_EXEC_2_ADDR1_LO__MASK;
+}
+
+#define REG_CP_COND_EXEC_3 0x00000003
+#define CP_COND_EXEC_3_ADDR1_HI__MASK 0xffffffff
+#define CP_COND_EXEC_3_ADDR1_HI__SHIFT 0
+static inline uint32_t CP_COND_EXEC_3_ADDR1_HI(uint32_t val)
{
- return ((val) << A2XX_CP_SET_MARKER_0_MODE__SHIFT) & A2XX_CP_SET_MARKER_0_MODE__MASK;
+ return ((val) << CP_COND_EXEC_3_ADDR1_HI__SHIFT) & CP_COND_EXEC_3_ADDR1_HI__MASK;
}
-#define A2XX_CP_SET_MARKER_0_IFPC 0x00000100
-static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define REG_CP_COND_EXEC_4 0x00000004
+#define CP_COND_EXEC_4_REF__MASK 0xffffffff
+#define CP_COND_EXEC_4_REF__SHIFT 0
+static inline uint32_t CP_COND_EXEC_4_REF(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_4_REF__SHIFT) & CP_COND_EXEC_4_REF__MASK;
+}
-static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
-#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
-#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
-static inline uint32_t A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+#define REG_CP_COND_EXEC_5 0x00000005
+#define CP_COND_EXEC_5_DWORDS__MASK 0xffffffff
+#define CP_COND_EXEC_5_DWORDS__SHIFT 0
+static inline uint32_t CP_COND_EXEC_5_DWORDS(uint32_t val)
{
- return ((val) << A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+ return ((val) << CP_COND_EXEC_5_DWORDS__SHIFT) & CP_COND_EXEC_5_DWORDS__MASK;
}
-static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
-#define A2XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
-#define A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
-static inline uint32_t A2XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+#define REG_CP_SET_CTXSWITCH_IB_0 0x00000000
+#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK 0xffffffff
+#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_0_ADDR_LO(uint32_t val)
{
- return ((val) << A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A2XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+ return ((val) << CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT) & CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK;
}
-static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
-#define A2XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
-#define A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
-static inline uint32_t A2XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+#define REG_CP_SET_CTXSWITCH_IB_1 0x00000001
+#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK 0xffffffff
+#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_1_ADDR_HI(uint32_t val)
{
- return ((val) << A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A2XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+ return ((val) << CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT) & CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK;
}
-#define REG_A2XX_CP_REG_TEST_0 0x00000000
-#define A2XX_CP_REG_TEST_0_REG__MASK 0x00000fff
-#define A2XX_CP_REG_TEST_0_REG__SHIFT 0
-static inline uint32_t A2XX_CP_REG_TEST_0_REG(uint32_t val)
+#define REG_CP_SET_CTXSWITCH_IB_2 0x00000002
+#define CP_SET_CTXSWITCH_IB_2_DWORDS__MASK 0x000fffff
+#define CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_2_DWORDS(uint32_t val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT) & CP_SET_CTXSWITCH_IB_2_DWORDS__MASK;
+}
+#define CP_SET_CTXSWITCH_IB_2_TYPE__MASK 0x00300000
+#define CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT 20
+static inline uint32_t CP_SET_CTXSWITCH_IB_2_TYPE(enum ctxswitch_ib val)
{
- return ((val) << A2XX_CP_REG_TEST_0_REG__SHIFT) & A2XX_CP_REG_TEST_0_REG__MASK;
+ return ((val) << CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT) & CP_SET_CTXSWITCH_IB_2_TYPE__MASK;
}
-#define A2XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
-#define A2XX_CP_REG_TEST_0_BIT__SHIFT 20
-static inline uint32_t A2XX_CP_REG_TEST_0_BIT(uint32_t val)
+
+#define REG_CP_REG_WRITE_0 0x00000000
+#define CP_REG_WRITE_0_TRACKER__MASK 0x00000007
+#define CP_REG_WRITE_0_TRACKER__SHIFT 0
+static inline uint32_t CP_REG_WRITE_0_TRACKER(enum reg_tracker val)
+{
+ return ((val) << CP_REG_WRITE_0_TRACKER__SHIFT) & CP_REG_WRITE_0_TRACKER__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_0 0x00000000
+#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT) & CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_1 0x00000001
+#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK 0x0000ffff
+#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT) & CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK;
+}
+#define CP_SMMU_TABLE_UPDATE_1_ASID__MASK 0xffff0000
+#define CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT 16
+static inline uint32_t CP_SMMU_TABLE_UPDATE_1_ASID(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT) & CP_SMMU_TABLE_UPDATE_1_ASID__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_2 0x00000002
+#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT) & CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_3 0x00000003
+#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val)
{
- return ((val) << A2XX_CP_REG_TEST_0_BIT__SHIFT) & A2XX_CP_REG_TEST_0_BIT__MASK;
+ return ((val) << CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT) & CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK;
}
-#define A2XX_CP_REG_TEST_0_UNK25 0x02000000
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 7c230f719ad3..b36919d95362 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/mutex.h>
+#include <linux/pm_opp.h>
#include <linux/sort.h>
#include <linux/clk.h>
#include <linux/bitmap.h>
@@ -218,7 +219,7 @@ static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
rate = core_clk->max_rate;
core_clk->rate = rate;
- return msm_dss_clk_set_rate(core_clk, 1);
+ return dev_pm_opp_set_rate(&kms->pdev->dev, core_clk->rate);
}
static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index e15b42a780e0..f272a8d0f95b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -389,14 +389,14 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
if (!fevent) {
- DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+ DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
return;
}
fevent->event = event;
fevent->crtc = crtc;
fevent->ts = ktime_get();
- kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+ kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
}
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 797e8fd4c16f..a97f6d2e5a08 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -208,6 +208,36 @@ struct dpu_encoder_virt {
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static u32 dither_matrix[DITHER_MATRIX_SZ] = {
+ 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
+};
+
+static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
+{
+ struct dpu_hw_dither_cfg dither_cfg = { 0 };
+
+ if (!hw_pp->ops.setup_dither)
+ return;
+
+ switch (bpc) {
+ case 6:
+ dither_cfg.c0_bitdepth = 6;
+ dither_cfg.c1_bitdepth = 6;
+ dither_cfg.c2_bitdepth = 6;
+ dither_cfg.c3_bitdepth = 6;
+ dither_cfg.temporal_en = 0;
+ break;
+ default:
+ hw_pp->ops.setup_dither(hw_pp, NULL);
+ return;
+ }
+
+ memcpy(&dither_cfg.matrix, dither_matrix,
+ sizeof(u32) * DITHER_MATRIX_SZ);
+
+ hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
+}
+
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
{
@@ -504,7 +534,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_kms *dpu_kms,
struct drm_display_mode *mode)
{
- struct msm_display_topology topology;
+ struct msm_display_topology topology = {0};
int i, intf_count = 0;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
@@ -520,7 +550,8 @@ static struct msm_display_topology dpu_encoder_get_topology(
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
- * Adding color blocks only to primary interface
+ * Adding color blocks only to primary interface if available in
+ * sufficient number
*/
if (intf_count == 2)
topology.num_lm = 2;
@@ -529,8 +560,11 @@ static struct msm_display_topology dpu_encoder_get_topology(
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
- if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
- topology.num_dspp = topology.num_lm;
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
+ if (dpu_kms->catalog->dspp &&
+ (dpu_kms->catalog->dspp_count >= topology.num_lm))
+ topology.num_dspp = topology.num_lm;
+ }
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -1054,7 +1088,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
+ int i;
if (!drm_enc || !drm_enc->dev) {
DPU_ERROR("invalid parameters\n");
@@ -1062,7 +1096,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
}
priv = drm_enc->dev->dev_private;
- dpu_kms = to_dpu_kms(priv->kms);
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!dpu_enc || !dpu_enc->cur_master) {
@@ -1070,13 +1103,17 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
return;
}
- if (dpu_enc->cur_master->hw_mdptop &&
- dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
- dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
- dpu_enc->cur_master->hw_mdptop,
- dpu_kms->catalog);
-
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
+ !WARN_ON(dpu_enc->num_phys_encs == 0)) {
+ unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc;
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_enc->hw_pp[i])
+ continue;
+ _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
+ }
+ }
}
void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
@@ -2109,7 +2146,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
dpu_enc = to_dpu_encoder_virt(enc);
- mutex_init(&dpu_enc->enc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
@@ -2124,7 +2160,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
0);
- mutex_init(&dpu_enc->rc_lock);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
@@ -2156,7 +2191,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
if (!dpu_enc)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
drm_enc_mode, NULL);
@@ -2169,6 +2204,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
spin_lock_init(&dpu_enc->enc_spinlock);
dpu_enc->enabled = false;
+ mutex_init(&dpu_enc->enc_lock);
+ mutex_init(&dpu_enc->rc_lock);
return &dpu_enc->base;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 29d4fde3172b..97d122eee96d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -43,6 +43,10 @@
#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
+#define INTF_SDM845_MASK (0)
+
+#define INTF_SC7180_MASK BIT(DPU_INTF_INPUT_CTRL) | BIT(DPU_INTF_TE)
+
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -70,6 +74,10 @@ static const struct dpu_caps sdm845_dpu_caps = {
.has_dim_layer = true,
.has_idle_pc = true,
.has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
};
static const struct dpu_caps sc7180_dpu_caps = {
@@ -80,6 +88,39 @@ static const struct dpu_caps sc7180_dpu_caps = {
.ubwc_version = DPU_HW_UBWC_VER_20,
.has_dim_layer = true,
.has_idle_pc = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_caps sm8150_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_30,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_caps sm8250_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .max_linewidth = 4096,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_40,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
static const struct dpu_mdp_cfg sdm845_mdp[] = {
@@ -117,10 +158,37 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
.reg_off = 0x2AC, .bit_off = 8},
- .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
.reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+static const struct dpu_mdp_cfg sm8250_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x45C,
+ .features = 0,
+ .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
- .reg_off = 0x2BC, .bit_off = 8},
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
+ .reg_off = 0x2BC, .bit_off = 20},
},
};
@@ -173,21 +241,47 @@ static const struct dpu_ctl_cfg sc7180_ctl[] = {
},
};
+static const struct dpu_ctl_cfg sm8150_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+};
+
/*************************************************************
* SSPP sub blocks config
*************************************************************/
/* SSPP common configuration */
-static const struct dpu_sspp_blks_common sdm845_sspp_common = {
- .maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
- .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
- .maxhdeciexp = MAX_HORZ_DECIMATION,
- .maxvdeciexp = MAX_VERT_DECIMATION,
-};
#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
{ \
- .common = &sdm845_sspp_common, \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
.smart_dma_priority = sdma_pri, \
@@ -207,7 +301,6 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
#define _DMA_SBLK(num, sdma_pri) \
{ \
- .common = &sdm845_sspp_common, \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
.smart_dma_priority = sdma_pri, \
@@ -272,10 +365,10 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
- SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
- sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
/*************************************************************
@@ -336,6 +429,23 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
&sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
};
+/* SM8150 */
+
+static const struct dpu_lm_cfg sm8150_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
+ LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
+ LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+};
+
/*************************************************************
* DSPP sub blocks config
*************************************************************/
@@ -355,6 +465,7 @@ static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
static const struct dpu_dspp_cfg sc7180_dspp[] = {
DSPP_BLK("dspp_0", DSPP_0, 0x54000),
};
+
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
@@ -397,29 +508,45 @@ static struct dpu_pingpong_cfg sc7180_pp[] = {
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
};
+static const struct dpu_pingpong_cfg sm8150_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+ PP_BLK("pingpong_4", PINGPONG_4, 0x72000),
+ PP_BLK("pingpong_5", PINGPONG_5, 0x72800),
+};
+
/*************************************************************
* INTF sub blocks config
*************************************************************/
-#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
- .features = BIT(DPU_CTL_ACTIVE_CFG), \
+ .features = _features, \
.type = _type, \
.controller_id = _ctrl_id, \
.prog_fetch_lines_worst_case = 24 \
}
static const struct dpu_intf_cfg sdm845_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK),
};
static const struct dpu_intf_cfg sc7180_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
+};
+
+static const struct dpu_intf_cfg sm8150_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK),
};
/*************************************************************
@@ -452,6 +579,18 @@ static const struct dpu_reg_dma_cfg sdm845_regdma = {
.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
};
+static const struct dpu_reg_dma_cfg sm8150_regdma = {
+ .base = 0x0, .version = 0x00010001, .trigger_sel_off = 0x119c
+};
+
+static const struct dpu_reg_dma_cfg sm8250_regdma = {
+ .base = 0x0,
+ .version = 0x00010002,
+ .trigger_sel_off = 0x119c,
+ .xin_id = 7,
+ .clk_ctrl = DPU_CLK_CTRL_REG_DMA,
+};
+
/*************************************************************
* PERF data config
*************************************************************/
@@ -476,6 +615,10 @@ static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
{.fl = 0, .lut = 0x0011222222335777},
};
+static const struct dpu_qos_lut_entry sm8150_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222223357 },
+};
+
static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 10, .lut = 0x344556677},
{.fl = 11, .lut = 0x3344556677},
@@ -560,6 +703,56 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
},
};
+static const struct dpu_perf_cfg sm8150_perf_data = {
+ .max_bw_low = 12800000,
+ .max_bw_high = 12800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+ .entries = sm8150_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
+static const struct dpu_perf_cfg sm8250_perf_data = {
+ .max_bw_low = 13700000,
+ .max_bw_high = 16600000,
+ .min_core_ib = 4800000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
/*************************************************************
* Hardware catalog init
*************************************************************/
@@ -624,9 +817,71 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
};
}
+/*
+ * sm8150_cfg_init(): populate sm8150 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sm8150_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sm8150_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .intf_count = ARRAY_SIZE(sm8150_intf),
+ .intf = sm8150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sm8150_regdma,
+ .perf = sm8150_perf_data,
+ .mdss_irqs = 0x3ff,
+ };
+}
+
+/*
+ * sm8250_cfg_init(): populate sm8250 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sm8250_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sm8250_mdp),
+ .mdp = sm8250_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ /* TODO: sspp qseed version differs from 845 */
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .intf_count = ARRAY_SIZE(sm8150_intf),
+ .intf = sm8150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sm8250_regdma,
+ .perf = sm8250_perf_data,
+ .mdss_irqs = 0xff,
+ };
+}
+
static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_500, .cfg_init = sm8150_cfg_init},
+ { .hw_rev = DPU_HW_VER_501, .cfg_init = sm8150_cfg_init},
+ { .hw_rev = DPU_HW_VER_600, .cfg_init = sm8250_cfg_init},
{ .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index f7de43838c69..1b7a9213a756 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -37,7 +37,9 @@
#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
-#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sm8150 v1.0 */
+#define DPU_HW_VER_501 DPU_HW_VER(5, 0, 1) /* sm8150 v2.0 */
+#define DPU_HW_VER_600 DPU_HW_VER(6, 0, 0) /* sm8250 */
#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
@@ -65,10 +67,9 @@ enum {
DPU_HW_UBWC_VER_10 = 0x100,
DPU_HW_UBWC_VER_20 = 0x200,
DPU_HW_UBWC_VER_30 = 0x300,
+ DPU_HW_UBWC_VER_40 = 0x400,
};
-#define IS_UBWC_20_SUPPORTED(rev) ((rev) >= DPU_HW_UBWC_VER_20)
-
/**
* MDP TOP BLOCK features
* @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
@@ -186,6 +187,19 @@ enum {
};
/**
+ * INTF sub-blocks
+ * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
+ * pixel data arrives to this INTF
+ * @DPU_INTF_TE INTF block has TE configuration support
+ * @DPU_INTF_MAX
+ */
+enum {
+ DPU_INTF_INPUT_CTRL = 0x1,
+ DPU_INTF_TE,
+ DPU_INTF_MAX
+};
+
+/**
* VBIF sub-blocks and features
* @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
* @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
@@ -300,6 +314,10 @@ struct dpu_qos_lut_tbl {
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
* @has_3d_merge indicate if 3D merge is supported
+ * @max_linewidth max linewidth for sspp
+ * @pixel_ram_size size of latency hiding and de-tiling buffer in bytes
+ * @max_hdeci_exp max horizontal decimation supported (max is 2^value)
+ * @max_vdeci_exp max vertical decimation supported (max is 2^value)
*/
struct dpu_caps {
u32 max_mixer_width;
@@ -311,22 +329,11 @@ struct dpu_caps {
bool has_dim_layer;
bool has_idle_pc;
bool has_3d_merge;
-};
-
-/**
- * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
- * @maxwidth: max pixelwidth supported by this pipe
- * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
- * @maxhdeciexp: max horizontal decimation supported by this pipe
- * (max is 2^value)
- * @maxvdeciexp: max vertical decimation supported by this pipe
- * (max is 2^value)
- */
-struct dpu_sspp_blks_common {
- u32 maxlinewidth;
+ /* SSPP limits */
+ u32 max_linewidth;
u32 pixel_ram_size;
- u32 maxhdeciexp;
- u32 maxvdeciexp;
+ u32 max_hdeci_exp;
+ u32 max_vdeci_exp;
};
/**
@@ -352,7 +359,6 @@ struct dpu_sspp_blks_common {
* @virt_num_formats: Number of supported formats for virtual planes
*/
struct dpu_sspp_sub_blks {
- const struct dpu_sspp_blks_common *common;
u32 creq_vblank;
u32 danger_vblank;
u32 maxdwnscale;
@@ -423,6 +429,7 @@ enum dpu_clk_ctrl_type {
DPU_CLK_CTRL_CURSOR0,
DPU_CLK_CTRL_CURSOR1,
DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+ DPU_CLK_CTRL_REG_DMA,
DPU_CLK_CTRL_MAX,
};
@@ -447,7 +454,6 @@ struct dpu_clk_ctrl_reg {
struct dpu_mdp_cfg {
DPU_HW_BLK_INFO;
u32 highest_bank_bit;
- u32 ubwc_static;
u32 ubwc_swizzle;
struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
};
@@ -607,6 +613,8 @@ struct dpu_reg_dma_cfg {
DPU_HW_BLK_INFO;
u32 version;
u32 trigger_sel_off;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 613ae8f0cfcd..758c355b4fd8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -245,30 +245,14 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
- switch (intf) {
- case INTF_0:
- case INTF_1:
- *flushbits |= BIT(31);
- break;
- default:
- return 0;
- }
+ *flushbits |= BIT(31);
return 0;
}
static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
- switch (intf) {
- case INTF_0:
- *flushbits |= BIT(0);
- break;
- case INTF_1:
- *flushbits |= BIT(1);
- break;
- default:
- return 0;
- }
+ *flushbits |= BIT(intf - INTF_0);
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index efe9a5719c6b..6f0f54588124 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -107,11 +107,6 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
p->hsync_skew - 1;
- if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
- display_v_start += p->hsync_pulse_width + p->h_back_porch;
- display_v_end -= p->h_front_porch;
- }
-
hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
hsync_end_x = hsync_period - p->h_front_porch - 1;
@@ -144,10 +139,25 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
+ if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+
+ active_hctl = (active_h_end << 16) | active_h_start;
+ display_hctl = active_hctl;
+ }
+
den_polarity = 0;
if (ctx->cap->type == INTF_HDMI) {
hsync_polarity = p->yres >= 720 ? 0 : 1;
vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else if (ctx->cap->type == INTF_DP) {
+ hsync_polarity = p->hsync_polarity;
+ vsync_polarity = p->vsync_polarity;
} else {
hsync_polarity = 0;
vsync_polarity = 0;
@@ -225,14 +235,9 @@ static void dpu_hw_intf_bind_pingpong_blk(
bool enable,
const enum dpu_pingpong pp)
{
- struct dpu_hw_blk_reg_map *c;
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 mux_cfg;
- if (!intf)
- return;
-
- c = &intf->hw;
-
mux_cfg = DPU_REG_READ(c, INTF_MUX);
mux_cfg &= ~0xf;
@@ -280,7 +285,7 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->get_line_count = dpu_hw_intf_get_line_count;
- if (cap & BIT(DPU_CTL_ACTIVE_CFG))
+ if (cap & BIT(DPU_INTF_INPUT_CTRL))
ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 37becd43bd54..4b8baf71423f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -152,14 +152,13 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
unsigned long features)
{
ops->setup_mixer_out = dpu_hw_lm_setup_out;
- if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)
- || IS_SC7180_TARGET(m->hwversion))
+ if (m->hwversion >= DPU_HW_VER_400)
ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
-};
+}
static struct dpu_hw_blk_ops dpu_hw_ops;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 402dc5832361..979fd2c60aa0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -171,6 +171,7 @@ enum dpu_ctl {
CTL_2,
CTL_3,
CTL_4,
+ CTL_5,
CTL_MAX
};
@@ -180,6 +181,7 @@ enum dpu_pingpong {
PINGPONG_2,
PINGPONG_3,
PINGPONG_4,
+ PINGPONG_5,
PINGPONG_S0,
PINGPONG_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index d110a40f0e73..bea4ab5c58c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -28,6 +28,16 @@
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
+#define PP_DITHER_EN 0x000
+#define PP_DITHER_BITDEPTH 0x004
+#define PP_DITHER_MATRIX 0x008
+
+#define DITHER_DEPTH_MAP_INDEX 9
+
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+ 0, 0, 0, 0, 0, 0, 0, 1, 2
+};
+
static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -49,6 +59,37 @@ static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
return ERR_PTR(-EINVAL);
}
+static void dpu_hw_pp_setup_dither(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_dither_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 i, base, data = 0;
+
+ c = &pp->hw;
+ base = pp->caps->sblk->dither.base;
+ if (!cfg) {
+ DPU_REG_WRITE(c, base + PP_DITHER_EN, 0);
+ return;
+ }
+
+ data = dither_depth_map[cfg->c0_bitdepth] & REG_MASK(2);
+ data |= (dither_depth_map[cfg->c1_bitdepth] & REG_MASK(2)) << 2;
+ data |= (dither_depth_map[cfg->c2_bitdepth] & REG_MASK(2)) << 4;
+ data |= (dither_depth_map[cfg->c3_bitdepth] & REG_MASK(2)) << 6;
+ data |= (cfg->temporal_en) ? (1 << 8) : 0;
+
+ DPU_REG_WRITE(c, base + PP_DITHER_BITDEPTH, data);
+
+ for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) {
+ data = (cfg->matrix[i] & REG_MASK(4)) |
+ ((cfg->matrix[i + 1] & REG_MASK(4)) << 4) |
+ ((cfg->matrix[i + 2] & REG_MASK(4)) << 8) |
+ ((cfg->matrix[i + 3] & REG_MASK(4)) << 12);
+ DPU_REG_WRITE(c, base + PP_DITHER_MATRIX + i, data);
+ }
+ DPU_REG_WRITE(c, base + PP_DITHER_EN, 1);
+}
+
static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
struct dpu_hw_tear_check *te)
{
@@ -180,15 +221,18 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
return line;
}
-static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
- const struct dpu_pingpong_cfg *hw_cap)
+static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
+ unsigned long features)
{
- ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
- ops->enable_tearcheck = dpu_hw_pp_enable_te;
- ops->connect_external_te = dpu_hw_pp_connect_external_te;
- ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
- ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
- ops->get_line_count = dpu_hw_pp_get_line_count;
+ c->ops.setup_tearcheck = dpu_hw_pp_setup_te_config;
+ c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
+ c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
+ c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
+ c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+ c->ops.get_line_count = dpu_hw_pp_get_line_count;
+
+ if (test_bit(DPU_PINGPONG_DITHER, &features))
+ c->ops.setup_dither = dpu_hw_pp_setup_dither;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
@@ -212,7 +256,7 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
c->idx = idx;
c->caps = cfg;
- _setup_pingpong_ops(&c->ops, c->caps);
+ _setup_pingpong_ops(c, c->caps->features);
dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index d73cb73e938b..065996b3ece9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -10,6 +10,8 @@
#include "dpu_hw_util.h"
#include "dpu_hw_blk.h"
+#define DITHER_MATRIX_SZ 16
+
struct dpu_hw_pingpong;
struct dpu_hw_tear_check {
@@ -35,6 +37,26 @@ struct dpu_hw_pp_vsync_info {
};
/**
+ * struct dpu_hw_dither_cfg - dither feature structure
+ * @flags: for customizing operations
+ * @temporal_en: temperal dither enable
+ * @c0_bitdepth: c0 component bit depth
+ * @c1_bitdepth: c1 component bit depth
+ * @c2_bitdepth: c2 component bit depth
+ * @c3_bitdepth: c2 component bit depth
+ * @matrix: dither strength matrix
+ */
+struct dpu_hw_dither_cfg {
+ u64 flags;
+ u32 temporal_en;
+ u32 c0_bitdepth;
+ u32 c1_bitdepth;
+ u32 c2_bitdepth;
+ u32 c3_bitdepth;
+ u32 matrix[DITHER_MATRIX_SZ];
+};
+
+/**
*
* struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
* Assumption is these functions will be called after clocks are enabled
@@ -82,6 +104,12 @@ struct dpu_hw_pingpong_ops {
* Obtain current vertical line counter
*/
u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Setup dither matix for pingpong block
+ */
+ void (*setup_dither)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_dither_cfg *cfg);
};
struct dpu_hw_pingpong {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 82c5dbfdabc7..c940b69435e1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -303,11 +303,25 @@ static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
DPU_FETCH_CONFIG_RESET_VALUE |
ctx->mdp->highest_bank_bit << 18);
- if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+ switch (ctx->catalog->caps->ubwc_version) {
+ case DPU_HW_UBWC_VER_10:
+ /* TODO: UBWC v1 case */
+ break;
+ case DPU_HW_UBWC_VER_20:
fast_clear = fmt->alpha_enable ? BIT(31) : 0;
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
fast_clear | (ctx->mdp->ubwc_swizzle) |
(ctx->mdp->highest_bank_bit << 4));
+ break;
+ case DPU_HW_UBWC_VER_30:
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ BIT(30) | (ctx->mdp->ubwc_swizzle) |
+ (ctx->mdp->highest_bank_bit << 4));
+ break;
+ case DPU_HW_UBWC_VER_40:
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
+ break;
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index f9af52ae9f3e..01b76766a9a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -8,7 +8,6 @@
#include "dpu_kms.h"
#define SSPP_SPARE 0x28
-#define UBWC_STATIC 0x144
#define FLD_SPLIT_DISPLAY_CMD BIT(1)
#define FLD_SMART_PANEL_FREE_RUN BIT(2)
@@ -249,22 +248,6 @@ static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
}
-static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
-{
- struct dpu_hw_blk_reg_map c;
-
- if (!mdp || !m)
- return;
-
- if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
- return;
-
- /* force blk offset to zero to access beginning of register region */
- c = mdp->hw;
- c.blk_off = 0x0;
- DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
-}
-
static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
{
struct dpu_hw_blk_reg_map *c;
@@ -285,7 +268,6 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->get_danger_status = dpu_hw_get_danger_status;
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
ops->get_safe_status = dpu_hw_get_safe_status;
- ops->reset_ubwc = dpu_hw_reset_ubwc;
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 1d9d32edf619..8018fff5667a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -127,13 +127,6 @@ struct dpu_hw_mdp_ops {
struct dpu_danger_safe_status *status);
/**
- * reset_ubwc - reset top level UBWC configuration
- * @mdp: mdp top context driver
- * @m: pointer to mdss catalog data
- */
- void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
-
- /**
* intf_audio_select - select the external interface for audio
* @mdp: mdp top context driver
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index b8615d4fe8a3..c0a4d4e16d82 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/of_irq.h>
+#include <linux/pm_opp.h>
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
@@ -45,20 +46,6 @@
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
-static unsigned long dpu_iomap_size(struct platform_device *pdev,
- const char *name)
-{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
- if (!res) {
- DRM_ERROR("failed to get memory resource: %s\n", name);
- return 0;
- }
-
- return resource_size(res);
-}
-
#ifdef CONFIG_DEBUG_FS
static int _dpu_danger_signal_status(struct seq_file *s,
bool danger_status)
@@ -780,7 +767,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
aspace = msm_gem_address_space_create(mmu, "dpu1",
- 0x1000, 0xfffffff);
+ 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
mmu->funcs->destroy(mmu);
@@ -844,7 +831,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto error;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
- dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
@@ -853,22 +839,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->vbif[VBIF_RT] = NULL;
goto error;
}
- dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
- dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
dpu_kms->vbif[VBIF_NRT] = NULL;
DPU_DEBUG("VBIF NRT is not defined");
- } else {
- dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
- "vbif_nrt");
}
- dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+ dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma", "regdma");
if (IS_ERR(dpu_kms->reg_dma)) {
dpu_kms->reg_dma = NULL;
DPU_DEBUG("REG_DMA is not defined");
- } else {
- dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
}
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -1025,11 +1005,24 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
if (!dpu_kms)
return -ENOMEM;
+ dpu_kms->opp_table = dev_pm_opp_set_clkname(dev, "core");
+ if (IS_ERR(dpu_kms->opp_table))
+ return PTR_ERR(dpu_kms->opp_table);
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(dev);
+ if (!ret) {
+ dpu_kms->has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(dev, "invalid OPP table in device tree\n");
+ dev_pm_opp_put_clkname(dpu_kms->opp_table);
+ return ret;
+ }
+
mp = &dpu_kms->mp;
ret = msm_dss_parse_clock(pdev, mp);
if (ret) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
- return ret;
+ goto err;
}
platform_set_drvdata(pdev, dpu_kms);
@@ -1043,6 +1036,11 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
priv->kms = &dpu_kms->base;
return ret;
+err:
+ if (dpu_kms->has_opp_table)
+ dev_pm_opp_of_remove_table(dev);
+ dev_pm_opp_put_clkname(dpu_kms->opp_table);
+ return ret;
}
static void dpu_unbind(struct device *dev, struct device *master, void *data)
@@ -1057,6 +1055,10 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
if (dpu_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
+
+ if (dpu_kms->has_opp_table)
+ dev_pm_opp_of_remove_table(dev);
+ dev_pm_opp_put_clkname(dpu_kms->opp_table);
}
static const struct component_ops dpu_ops = {
@@ -1082,6 +1084,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
@@ -1115,6 +1119,8 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
static const struct dev_pm_ops dpu_pm_ops = {
SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id dpu_dt_match[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index a3b122bfb676..e140cd633071 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -100,7 +100,6 @@ struct dpu_kms {
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
- unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
struct regulator *vdd;
struct regulator *mmagic;
@@ -128,6 +127,10 @@ struct dpu_kms {
struct platform_device *pdev;
bool rpm_enabled;
+
+ struct opp_table *opp_table;
+ bool has_opp_table;
+
struct dss_module_power mp;
/* reference count bandwidth requests, so we know when we can
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 80d3cfc14007..7d3fdbb00e7e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -15,6 +15,10 @@
#define HW_REV 0x0
#define HW_INTR_STATUS 0x0010
+#define UBWC_STATIC 0x144
+#define UBWC_CTRL_2 0x150
+#define UBWC_PREDICTION_MODE 0x154
+
/* Max BW defined in KBps */
#define MAX_BW 6800000
@@ -23,65 +27,15 @@ struct dpu_irq_controller {
struct irq_domain *domain;
};
-struct dpu_hw_cfg {
- u32 val;
- u32 offset;
-};
-
-struct dpu_mdss_hw_init_handler {
- u32 hw_rev;
- u32 hw_reg_count;
- struct dpu_hw_cfg* hw_cfg;
-};
-
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
- unsigned long mmio_len;
struct dss_module_power mp;
struct dpu_irq_controller irq_controller;
struct icc_path *path[2];
u32 num_paths;
};
-static struct dpu_hw_cfg hw_cfg[] = {
- {
- /* UBWC global settings */
- .val = 0x1E,
- .offset = 0x144,
- }
-};
-
-static struct dpu_mdss_hw_init_handler cfg_handler[] = {
- { .hw_rev = DPU_HW_VER_620,
- .hw_reg_count = ARRAY_SIZE(hw_cfg),
- .hw_cfg = hw_cfg
- },
-};
-
-static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev)
-{
- int i;
- u32 count = 0;
- struct dpu_hw_cfg *hw_cfg = NULL;
-
- for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
- if (cfg_handler[i].hw_rev == hw_rev) {
- hw_cfg = cfg_handler[i].hw_cfg;
- count = cfg_handler[i].hw_reg_count;
- break;
- }
- }
-
- for (i = 0; i < count; i++ ) {
- writel_relaxed(hw_cfg->val,
- dpu_mdss->mmio + hw_cfg->offset);
- hw_cfg++;
- }
-
- return;
-}
-
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
struct dpu_mdss *dpu_mdss)
{
@@ -224,7 +178,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
- u32 mdss_rev;
dpu_mdss_icc_request_bw(mdss);
@@ -234,8 +187,25 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
return ret;
}
- mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV);
- dpu_mdss_hw_init(dpu_mdss, mdss_rev);
+ /*
+ * ubwc config is part of the "mdss" region which is not accessible
+ * from the rest of the driver. hardcode known configurations here
+ */
+ switch (readl_relaxed(dpu_mdss->mmio + HW_REV)) {
+ case DPU_HW_VER_500:
+ case DPU_HW_VER_501:
+ writel_relaxed(0x420, dpu_mdss->mmio + UBWC_STATIC);
+ break;
+ case DPU_HW_VER_600:
+ /* TODO: 0x102e for LP_DDR4 */
+ writel_relaxed(0x103e, dpu_mdss->mmio + UBWC_STATIC);
+ writel_relaxed(2, dpu_mdss->mmio + UBWC_CTRL_2);
+ writel_relaxed(1, dpu_mdss->mmio + UBWC_PREDICTION_MODE);
+ break;
+ case DPU_HW_VER_620:
+ writel_relaxed(0x1e, dpu_mdss->mmio + UBWC_STATIC);
+ break;
+ }
return ret;
}
@@ -292,7 +262,6 @@ int dpu_mdss_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
- struct resource *res;
struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
int ret = 0;
@@ -308,13 +277,6 @@ int dpu_mdss_init(struct drm_device *dev)
DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
- if (!res) {
- DRM_ERROR("failed to get memory resource for mdss\n");
- return -ENOMEM;
- }
- dpu_mdss->mmio_len = resource_size(res);
-
ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 3b9c33e694bf..33f6c56f01ed 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -153,7 +153,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
pdpu = to_dpu_plane(plane);
pstate = to_dpu_plane_state(plane->state);
- fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+ fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
if (!tmp->base.state->visible)
@@ -709,7 +709,7 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
* So we cannot support more than half of the supported SSPP
* width for tiled formats.
*/
- width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+ width_threshold = dpu_plane[i]->catalog->caps->max_linewidth;
if (has_tiled_rect)
width_threshold /= 2;
@@ -887,7 +887,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
fb_rect.x2 = state->fb->width;
fb_rect.y2 = state->fb->height;
- max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+ max_linewidth = pdpu->catalog->caps->max_linewidth;
fmt = to_dpu_format(msm_framebuffer_format(state->fb));
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
index 4b36b8954bae..f1d1de578e4c 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 19291d77df40..dbf8d429223e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -514,7 +514,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
config->iommu);
aspace = msm_gem_address_space_create(mmu,
- "mdp4", 0x1000, 0xffffffff);
+ "mdp4", 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
index 784d98989e3a..4cf0953009f7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 25a13a2a57a9..df10c1ac7591 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -910,6 +910,202 @@ static const struct mdp5_cfg_hw msm8998_config = {
.max_clk = 412500000,
};
+static const struct mdp5_cfg_hw sdm630_config = {
+ .name = "sdm630",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf4ffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x46000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x78000, 0x78800 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x70000, 0x71000, 0x72000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_hw sdm660_config = {
+ .name = "sdm660",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf4ffffff,
+ },
+ .pipe_vig = {
+ .count = 2,
+ .base = { 0x04000, 0x6000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 4,
+ .base = { 0x44000, 0x45000, 0x46000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x78000, 0x78800 },
+ },
+ .pp = {
+ .count = 5,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800, 0x72000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
@@ -924,6 +1120,8 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
{ .revision = 0, .config = { .hw = &msm8998_config } },
+ { .revision = 2, .config = { .hw = &sdm660_config } },
+ { .revision = 3, .config = { .hw = &sdm630_config } },
};
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 19ec48695ffb..e193865ce9a2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
aspace = msm_gem_address_space_create(mmu, "mdp5",
- 0x1000, 0xffffffff);
+ 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
index d420c8044e23..4f51bea8fd3f 100644
--- a/drivers/gpu/drm/msm/disp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 55ea4bc2ee9c..627048851d99 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -161,6 +161,8 @@ static const struct of_device_id dt_match[] = {
static const struct dev_pm_ops dsi_pm_ops = {
SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver dsi_driver = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 21f489a737d7..8e536e060070 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -148,7 +148,31 @@ static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000
#define REG_DSI_FIFO_STATUS 0x00000008
+#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_OVERFLOW 0x00000001
+#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_UNDERFLOW 0x00000008
#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_RD_WATERMARK_REACH 0x00000100
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_WR_WATERMARK_REACH 0x00000200
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_UNDERFLOW 0x00000400
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_EMPTY 0x00001000
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_FULL 0x00002000
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_OVERFLOW 0x00004000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_EMPTY 0x00010000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_FULL 0x00020000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_OVERFLOW 0x00040000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_UNDERFLOW 0x00080000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_EMPTY 0x00100000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_FULL 0x00200000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_OVERFLOW 0x00400000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_UNDERFLOW 0x00800000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_EMPTY 0x01000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_FULL 0x02000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_OVERFLOW 0x04000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_UNDERFLOW 0x08000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_EMPTY 0x10000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_FULL 0x20000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_OVERFLOW 0x40000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_UNDERFLOW 0x80000000
#define REG_DSI_VID_CFG0 0x0000000c
#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
@@ -318,38 +342,72 @@ static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
#define REG_DSI_DMA_LEN 0x00000048
-#define REG_DSI_CMD_MDP_STREAM_CTRL 0x00000054
-#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK 0x0000003f
-#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT 0
-static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val)
+#define REG_DSI_CMD_MDP_STREAM0_CTRL 0x00000054
+#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(uint32_t val)
{
- return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK;
+ return ((val) << DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK;
}
-#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
-#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT 8
-static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(uint32_t val)
{
- return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK;
+ return ((val) << DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK;
}
-#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK 0xffff0000
-#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT 16
-static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val)
+#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(uint32_t val)
{
- return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK;
+ return ((val) << DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK;
}
-#define REG_DSI_CMD_MDP_STREAM_TOTAL 0x00000058
-#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK 0x00000fff
-#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT 0
-static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val)
+#define REG_DSI_CMD_MDP_STREAM0_TOTAL 0x00000058
+#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(uint32_t val)
{
- return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK;
+ return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK;
}
-#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK 0x0fff0000
-#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT 16
-static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val)
+#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(uint32_t val)
{
- return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK;
+ return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM1_CTRL 0x0000005c
+#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM1_TOTAL 0x00000060
+#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK 0x0000ffff
+#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK;
}
#define REG_DSI_ACK_ERR_STATUS 0x00000064
@@ -389,6 +447,35 @@ static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000
#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000
+#define REG_DSI_LP_TIMER_CTRL 0x000000b4
+#define DSI_LP_TIMER_CTRL_LP_RX_TO__MASK 0x0000ffff
+#define DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT 0
+static inline uint32_t DSI_LP_TIMER_CTRL_LP_RX_TO(uint32_t val)
+{
+ return ((val) << DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT) & DSI_LP_TIMER_CTRL_LP_RX_TO__MASK;
+}
+#define DSI_LP_TIMER_CTRL_BTA_TO__MASK 0xffff0000
+#define DSI_LP_TIMER_CTRL_BTA_TO__SHIFT 16
+static inline uint32_t DSI_LP_TIMER_CTRL_BTA_TO(uint32_t val)
+{
+ return ((val) << DSI_LP_TIMER_CTRL_BTA_TO__SHIFT) & DSI_LP_TIMER_CTRL_BTA_TO__MASK;
+}
+
+#define REG_DSI_HS_TIMER_CTRL 0x000000b8
+#define DSI_HS_TIMER_CTRL_HS_TX_TO__MASK 0x0000ffff
+#define DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT 0
+static inline uint32_t DSI_HS_TIMER_CTRL_HS_TX_TO(uint32_t val)
+{
+ return ((val) << DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT) & DSI_HS_TIMER_CTRL_HS_TX_TO__MASK;
+}
+#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK 0x000f0000
+#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT 16
+static inline uint32_t DSI_HS_TIMER_CTRL_TIMER_RESOLUTION(uint32_t val)
+{
+ return ((val) << DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT) & DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK;
+}
+#define DSI_HS_TIMER_CTRL_HS_TX_TO_STOP_EN 0x10000000
+
#define REG_DSI_TIMEOUT_STATUS 0x000000bc
#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
@@ -409,6 +496,19 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
+#define REG_DSI_LANE_STATUS 0x000000a4
+#define DSI_LANE_STATUS_DLN0_STOPSTATE 0x00000001
+#define DSI_LANE_STATUS_DLN1_STOPSTATE 0x00000002
+#define DSI_LANE_STATUS_DLN2_STOPSTATE 0x00000004
+#define DSI_LANE_STATUS_DLN3_STOPSTATE 0x00000008
+#define DSI_LANE_STATUS_CLKLN_STOPSTATE 0x00000010
+#define DSI_LANE_STATUS_DLN0_ULPS_ACTIVE_NOT 0x00000100
+#define DSI_LANE_STATUS_DLN1_ULPS_ACTIVE_NOT 0x00000200
+#define DSI_LANE_STATUS_DLN2_ULPS_ACTIVE_NOT 0x00000400
+#define DSI_LANE_STATUS_DLN3_ULPS_ACTIVE_NOT 0x00000800
+#define DSI_LANE_STATUS_CLKLN_ULPS_ACTIVE_NOT 0x00001000
+#define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000
+
#define REG_DSI_LANE_CTRL 0x000000a8
#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000
@@ -436,6 +536,21 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200
#define REG_DSI_CLK_STATUS 0x0000011c
+#define DSI_CLK_STATUS_DSI_AON_AHBM_HCLK_ACTIVE 0x00000001
+#define DSI_CLK_STATUS_DSI_DYN_AHBM_HCLK_ACTIVE 0x00000002
+#define DSI_CLK_STATUS_DSI_AON_AHBS_HCLK_ACTIVE 0x00000004
+#define DSI_CLK_STATUS_DSI_DYN_AHBS_HCLK_ACTIVE 0x00000008
+#define DSI_CLK_STATUS_DSI_AON_DSICLK_ACTIVE 0x00000010
+#define DSI_CLK_STATUS_DSI_DYN_DSICLK_ACTIVE 0x00000020
+#define DSI_CLK_STATUS_DSI_AON_BYTECLK_ACTIVE 0x00000040
+#define DSI_CLK_STATUS_DSI_DYN_BYTECLK_ACTIVE 0x00000080
+#define DSI_CLK_STATUS_DSI_AON_ESCCLK_ACTIVE 0x00000100
+#define DSI_CLK_STATUS_DSI_AON_PCLK_ACTIVE 0x00000200
+#define DSI_CLK_STATUS_DSI_DYN_PCLK_ACTIVE 0x00000400
+#define DSI_CLK_STATUS_DSI_DYN_CMD_PCLK_ACTIVE 0x00001000
+#define DSI_CLK_STATUS_DSI_CMD_PCLK_ACTIVE 0x00002000
+#define DSI_CLK_STATUS_DSI_VID_PCLK_ACTIVE 0x00004000
+#define DSI_CLK_STATUS_DSI_CAM_BIST_PCLK_ACT 0x00008000
#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000
#define REG_DSI_PHY_RESET 0x00000128
@@ -444,6 +559,51 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
+#define REG_DSI_CMD_MODE_MDP_CTRL2 0x000001b4
+#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK 0x0000000f
+#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT 0
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2(enum dsi_cmd_dst_format val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_R_SEL 0x00000010
+#define DSI_CMD_MODE_MDP_CTRL2_G_SEL 0x00000020
+#define DSI_CMD_MODE_MDP_CTRL2_B_SEL 0x00000040
+#define DSI_CMD_MODE_MDP_CTRL2_BYTE_MSB_LSB_FLIP 0x00000080
+#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK 0x00000700
+#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT 8
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK 0x00007000
+#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT 12
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_BURST_MODE 0x00010000
+
+#define REG_DSI_CMD_MODE_MDP_STREAM2_CTRL 0x000001b8
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE(uint32_t val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT(uint32_t val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK;
+}
+
#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 813d69deb5e8..f892f2cbe8bb 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -149,6 +149,25 @@ static const struct msm_dsi_config msm8998_dsi_cfg = {
.num_dsi = 2,
};
+static const char * const dsi_sdm660_bus_clk_names[] = {
+ "iface", "bus", "core", "core_mmss",
+};
+
+static const struct msm_dsi_config sdm660_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 2,
+ .regs = {
+ {"vdd", 73400, 32 }, /* 0.9 V */
+ {"vdda", 12560, 4 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sdm660_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names),
+ .io_start = { 0xc994000, 0xc996000 },
+ .num_dsi = 2,
+};
+
static const char * const dsi_sdm845_bus_clk_names[] = {
"iface", "bus",
};
@@ -240,6 +259,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2,
&msm8976_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_1_0,
+ &sdm660_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 217e24a65178..efd469d1db45 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -18,6 +18,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
+#define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 66ca0c009cfa..b17ac6c27554 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -14,6 +14,7 @@
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
@@ -111,6 +112,9 @@ struct msm_dsi_host {
struct clk *pixel_clk_src;
struct clk *byte_intf_clk;
+ struct opp_table *opp_table;
+ bool has_opp_table;
+
u32 byte_clk_rate;
u32 pixel_clk_rate;
u32 esc_clk_rate;
@@ -512,9 +516,10 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
DBG("Set clk rates: pclk=%d, byteclk=%d",
msm_host->mode->clock, msm_host->byte_clk_rate);
- ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
+ msm_host->byte_clk_rate);
if (ret) {
- pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+ pr_err("%s: dev_pm_opp_set_rate failed %d\n", __func__, ret);
return ret;
}
@@ -658,6 +663,8 @@ error:
void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
{
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(&msm_host->pdev->dev, 0);
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->pixel_clk);
if (msm_host->byte_intf_clk)
@@ -986,16 +993,16 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
/* image data and 1 byte write_memory_start cmd */
wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
- dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
- DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
- DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
+ DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
+ DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(
msm_host->channel) |
- DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
+ DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(
MIPI_DSI_DCS_LONG_WRITE));
- dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
- DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
- DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_TOTAL,
+ DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(hdisplay) |
+ DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(mode->vdisplay));
}
}
@@ -1879,6 +1886,19 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
+ msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "byte");
+ if (IS_ERR(msm_host->opp_table))
+ return PTR_ERR(msm_host->opp_table);
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(&pdev->dev);
+ if (!ret) {
+ msm_host->has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ dev_pm_opp_put_clkname(msm_host->opp_table);
+ return ret;
+ }
+
init_completion(&msm_host->dma_comp);
init_completion(&msm_host->video_comp);
mutex_init(&msm_host->dev_mutex);
@@ -1914,6 +1934,9 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
mutex_destroy(&msm_host->cmd_mutex);
mutex_destroy(&msm_host->dev_mutex);
+ if (msm_host->has_opp_table)
+ dev_pm_opp_of_remove_table(&msm_host->pdev->dev);
+ dev_pm_opp_put_clkname(msm_host->opp_table);
pm_runtime_disable(&msm_host->pdev->dev);
}
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 874265314413..4e8660c3eb08 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index f509ebd77500..009f5b843dd1 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -499,6 +499,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
{ .compatible = "qcom,dsi-phy-14nm",
.data = &dsi_phy_14nm_cfgs },
+ { .compatible = "qcom,dsi-phy-14nm-660",
+ .data = &dsi_phy_14nm_660_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 24b294ed3059..ef8672d7b123 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -45,6 +45,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 1594f1422372..519400501bcd 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -161,3 +161,21 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.io_start = { 0x994400, 0x996400 },
.num_dsi_phy = 2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
+ .type = MSM_DSI_PHY_14NM,
+ .src_pll_truthtable = { {false, false}, {true, false} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vcca", 17000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .init = dsi_14nm_phy_init,
+ },
+ .io_start = { 0xc994400, 0xc996000 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 07c48ddb5301..a3849220fc6a 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index 9cb6e6fe9810..7aed6cf27790 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 3eff3ea3b271..72c95b60b829 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 7717d4269662..85be1b1f19a4 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
-
-Copyright (C) 2013-2018 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 42301 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41874 bytes, from 2020-07-23 21:58:14)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2020-07-23 21:58:14)
+
+Copyright (C) 2013-2020 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c981cc10aebf..36d98d4116ca 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -120,8 +120,8 @@ struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
return clk;
}
-void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
- const char *dbgname)
+void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname, bool quiet)
{
struct resource *res;
unsigned long size;
@@ -133,7 +133,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
+ if (!quiet)
+ DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
return ERR_PTR(-EINVAL);
}
@@ -141,7 +142,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
ptr = devm_ioremap(&pdev->dev, res->start, size);
if (!ptr) {
- DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
+ if (!quiet)
+ DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
}
@@ -151,6 +153,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
return ptr;
}
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname)
+{
+ return _msm_ioremap(pdev, name, dbgname, false);
+}
+
+void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
+ const char *dbgname)
+{
+ return _msm_ioremap(pdev, name, dbgname, true);
+}
+
void msm_writel(u32 data, void __iomem *addr)
{
if (reglog)
@@ -238,10 +252,8 @@ static int msm_drm_uninit(struct device *dev)
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
- if (priv->event_thread[i].thread) {
- kthread_destroy_worker(&priv->event_thread[i].worker);
- priv->event_thread[i].thread = NULL;
- }
+ if (priv->event_thread[i].worker)
+ kthread_destroy_worker(priv->event_thread[i].worker);
}
msm_gem_shrinker_cleanup(ddev);
@@ -504,19 +516,15 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
for (i = 0; i < priv->num_crtcs; i++) {
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
- kthread_init_worker(&priv->event_thread[i].worker);
priv->event_thread[i].dev = ddev;
- priv->event_thread[i].thread =
- kthread_run(kthread_worker_fn,
- &priv->event_thread[i].worker,
- "crtc_event:%d", priv->event_thread[i].crtc_id);
- if (IS_ERR(priv->event_thread[i].thread)) {
+ priv->event_thread[i].worker = kthread_create_worker(0,
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
+ if (IS_ERR(priv->event_thread[i].worker)) {
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
- priv->event_thread[i].thread = NULL;
goto err_msm_uninit;
}
- ret = sched_setscheduler(priv->event_thread[i].thread,
+ ret = sched_setscheduler(priv->event_thread[i].worker->task,
SCHED_FIFO, &param);
if (ret)
dev_warn(dev, "event_thread set priority failed:%d\n",
@@ -1039,75 +1047,70 @@ static struct drm_driver msm_driver = {
.patchlevel = MSM_VERSION_PATCHLEVEL,
};
-#ifdef CONFIG_PM_SLEEP
-static int msm_pm_suspend(struct device *dev)
+static int __maybe_unused msm_runtime_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
- if (WARN_ON(priv->pm_state))
- drm_atomic_state_put(priv->pm_state);
+ DBG("");
- priv->pm_state = drm_atomic_helper_suspend(ddev);
- if (IS_ERR(priv->pm_state)) {
- int ret = PTR_ERR(priv->pm_state);
- DRM_ERROR("Failed to suspend dpu, %d\n", ret);
- return ret;
- }
+ if (mdss && mdss->funcs)
+ return mdss->funcs->disable(mdss);
return 0;
}
-static int msm_pm_resume(struct device *dev)
+static int __maybe_unused msm_runtime_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- int ret;
+ struct msm_mdss *mdss = priv->mdss;
- if (WARN_ON(!priv->pm_state))
- return -ENOENT;
+ DBG("");
- ret = drm_atomic_helper_resume(ddev, priv->pm_state);
- if (!ret)
- priv->pm_state = NULL;
+ if (mdss && mdss->funcs)
+ return mdss->funcs->enable(mdss);
- return ret;
+ return 0;
}
-#endif
-#ifdef CONFIG_PM
-static int msm_runtime_suspend(struct device *dev)
+static int __maybe_unused msm_pm_suspend(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev->dev_private;
- struct msm_mdss *mdss = priv->mdss;
- DBG("");
+ if (pm_runtime_suspended(dev))
+ return 0;
- if (mdss && mdss->funcs)
- return mdss->funcs->disable(mdss);
+ return msm_runtime_suspend(dev);
+}
- return 0;
+static int __maybe_unused msm_pm_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return msm_runtime_resume(dev);
}
-static int msm_runtime_resume(struct device *dev)
+static int __maybe_unused msm_pm_prepare(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct msm_drm_private *priv = ddev->dev_private;
- struct msm_mdss *mdss = priv->mdss;
- DBG("");
+ return drm_mode_config_helper_suspend(ddev);
+}
- if (mdss && mdss->funcs)
- return mdss->funcs->enable(mdss);
+static void __maybe_unused msm_pm_complete(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
- return 0;
+ drm_mode_config_helper_resume(ddev);
}
-#endif
static const struct dev_pm_ops msm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
};
/*
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e2d6a6056418..af259b0573ea 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -129,9 +129,8 @@ struct msm_display_info {
/* Commit/Event thread specific structure */
struct msm_drm_thread {
struct drm_device *dev;
- struct task_struct *thread;
unsigned int crtc_id;
- struct kthread_worker worker;
+ struct kthread_worker *worker;
};
struct msm_drm_private {
@@ -411,6 +410,8 @@ struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
+void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
+ const char *dbgname);
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 38b0c0e1f83e..b2f49152b4d4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -996,10 +996,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
- struct drm_gem_object **obj,
- bool struct_mutex_locked)
+ struct drm_gem_object **obj)
{
- struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -1025,15 +1023,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
- if (struct_mutex_locked) {
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
- } else {
- mutex_lock(&dev->struct_mutex);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
- mutex_unlock(&dev->struct_mutex);
- }
-
*obj = &msm_obj->base;
return 0;
@@ -1043,6 +1032,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags, bool struct_mutex_locked)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
bool use_vram = false;
int ret;
@@ -1063,14 +1053,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
+ ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
goto fail;
+ msm_obj = to_msm_bo(obj);
+
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
mutex_lock(&msm_obj->lock);
@@ -1105,6 +1096,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
+ if (struct_mutex_locked) {
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ } else {
+ mutex_lock(&dev->struct_mutex);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
return obj;
fail:
@@ -1127,6 +1127,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
uint32_t size;
@@ -1140,7 +1141,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
if (ret)
goto fail;
@@ -1165,6 +1166,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
}
mutex_unlock(&msm_obj->lock);
+
+ mutex_lock(&dev->struct_mutex);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ mutex_unlock(&dev->struct_mutex);
+
return obj;
fail:
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 86a138641477..d5645472b25d 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -13,7 +13,6 @@
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
-#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/devcoredump.h>
#include <linux/sched/task.h>
@@ -34,7 +33,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
return PTR_ERR(opp);
if (gpu->funcs->gpu_set_freq)
- gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
+ gpu->funcs->gpu_set_freq(gpu, opp);
else
clk_set_rate(gpu->core_clk, *freq);
@@ -93,7 +92,11 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
/*
* Don't set the freq_table or max_state and let devfreq build the table
* from OPP
+ * After a deferred probe, these may have be left to non-zero values,
+ * so set them back to zero before creating the devfreq device
*/
+ msm_devfreq_profile.freq_table = NULL;
+ msm_devfreq_profile.max_state = 0;
gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 429cb40f7931..0db117a7339b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/interconnect.h>
+#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
@@ -61,7 +62,7 @@ struct msm_gpu_funcs {
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
- void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
+ void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
struct msm_gem_address_space *(*create_address_space)
(struct msm_gpu *gpu, struct platform_device *pdev);
};
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 001fbf537440..a1d94be7883a 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags;
if (priv->gpu) {
- if (prio >= priv->gpu->nr_rings)
+ if (prio >= priv->gpu->nr_rings) {
+ kfree(queue);
return -EINVAL;
+ }
queue->prio = prio;
}
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 49e57fba4925..60586fb8275e 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -32,6 +32,13 @@ nouveau-y += nouveau_vga.o
# DRM - memory management
nouveau-y += nouveau_bo.o
+nouveau-y += nouveau_bo0039.o
+nouveau-y += nouveau_bo5039.o
+nouveau-y += nouveau_bo74c1.o
+nouveau-y += nouveau_bo85b5.o
+nouveau-y += nouveau_bo9039.o
+nouveau-y += nouveau_bo90b5.o
+nouveau-y += nouveau_boa0b5.o
nouveau-y += nouveau_gem.o
nouveau-$(CONFIG_DRM_NOUVEAU_SVM) += nouveau_svm.o
nouveau-$(CONFIG_DRM_NOUVEAU_SVM) += nouveau_dmem.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d6e4ae1ef705..5dec1e5694b7 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -76,6 +76,14 @@ config NOUVEAU_DEBUG_MMU
help
Say Y here if you want to enable verbose MMU debug output.
+config NOUVEAU_DEBUG_PUSH
+ bool "Enable additional push buffer debugging"
+ depends on DRM_NOUVEAU
+ default n
+ help
+ Say Y here if you want to enable verbose push buffer debug output
+ and sanity checks.
+
config DRM_NOUVEAU_BACKLIGHT
bool "Support for backlight control"
depends on DRM_NOUVEAU
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 4989627b7802..6416b6907aeb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -44,6 +44,8 @@
#include <subdev/bios/pll.h>
#include <subdev/clk.h>
+#include <nvif/push006c.h>
+
#include <nvif/event.h>
#include <nvif/cl0046.h>
@@ -759,7 +761,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- nvif_notify_fini(&nv_crtc->vblank);
+ nvif_notify_dtor(&nv_crtc->vblank);
kfree(nv_crtc);
}
@@ -1105,6 +1107,7 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_drm *drm = chan->drm;
struct drm_device *dev = drm->dev;
+ struct nvif_push *push = chan->chan.push;
unsigned long flags;
int ret;
@@ -1119,13 +1122,12 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
goto fail;
/* Emit the pageflip */
- ret = RING_SPACE(chan, 2);
+ ret = PUSH_WAIT(push, 2);
if (ret)
goto fail;
- BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING (chan, 0x00000000);
- FIRE_RING (chan);
+ PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
+ PUSH_KICK(push);
ret = nouveau_fence_new(chan, false, pfence);
if (ret)
@@ -1155,6 +1157,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_cli *cli;
struct nouveau_fence *fence;
struct nv04_display *dispnv04 = nv04_display(dev);
+ struct nvif_push *push;
int head = nouveau_crtc(crtc)->index;
int ret;
@@ -1162,6 +1165,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (!chan)
return -ENODEV;
cli = (void *)chan->user.client;
+ push = chan->chan.push;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -1203,18 +1207,14 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (swap_interval) {
- ret = RING_SPACE(chan, 8);
+ ret = PUSH_WAIT(push, 8);
if (ret)
goto fail_unreserve;
- BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
- OUT_RING (chan, head);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
- OUT_RING (chan, 0);
+ PUSH_NVSQ(push, NV05F, 0x012c, 0);
+ PUSH_NVSQ(push, NV05F, 0x0134, head);
+ PUSH_NVSQ(push, NV05F, 0x0100, 0);
+ PUSH_NVSQ(push, NV05F, 0x0130, 0);
}
nouveau_bo_ref(new_bo, &dispnv04->image[head]);
@@ -1351,7 +1351,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nv04_cursor_init(nv_crtc);
- ret = nvif_notify_init(&disp->disp.object, nv04_crtc_vblank_handler,
+ ret = nvif_notify_ctor(&disp->disp.object, "kmsVbl", nv04_crtc_vblank_handler,
false, NV04_DISP_NTFY_VBLANK,
&(struct nvif_notify_head_req_v0) {
.head = nv_crtc->index,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 76be805fc488..900ab69df7e8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -31,6 +31,7 @@
#include "nouveau_connector.h"
#include "nouveau_bo.h"
#include "nouveau_gem.h"
+#include "nouveau_chan.h"
#include <nvif/if0004.h>
@@ -178,7 +179,7 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 0);
- nvif_notify_fini(&disp->flip);
+ nvif_notify_dtor(&disp->flip);
nouveau_display(dev)->priv = NULL;
kfree(disp);
@@ -214,8 +215,8 @@ nv04_display_create(struct drm_device *dev)
dev->driver_features &= ~DRIVER_ATOMIC;
/* Request page flip completion event. */
- if (drm->nvsw.client) {
- nvif_notify_init(&drm->nvsw, nv04_flip_complete,
+ if (drm->channel) {
+ nvif_notify_ctor(&drm->channel->nvsw, "kmsFlip", nv04_flip_complete,
false, NV04_NVSW_NTFY_UEVENT,
NULL, 0, 0, &disp->flip);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
index e7f14f230f35..085bd3aeb40b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -11,14 +11,10 @@ int base507c_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
void base507c_release(struct nv50_wndw *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
-void base507c_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void base507c_sema_clr(struct nv50_wndw *);
-void base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void base507c_ntfy_clr(struct nv50_wndw *);
-void base507c_xlut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void base507c_xlut_clr(struct nv50_wndw *);
-void base507c_image_clr(struct nv50_wndw *);
-void base507c_update(struct nv50_wndw *, u32 *);
+int base507c_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_sema_clr(struct nv50_wndw *);
+int base507c_xlut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_xlut_clr(struct nv50_wndw *);
int base827c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index ba20a7722900..302d4e6fc52f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -23,91 +23,122 @@
#include <nvif/cl507c.h>
#include <nvif/event.h>
+#include <nvif/push507c.h>
#include <nvif/timer.h>
+#include <nvhw/class/cl507c.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "nouveau_bo.h"
-void
+int
base507c_update(struct nv50_wndw *wndw, u32 *interlock)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, interlock[NV50_DISP_INTERLOCK_CORE]);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, UPDATE, interlock[NV50_DISP_INTERLOCK_CORE]);
+ return PUSH_KICK(push);
}
-void
+int
base507c_image_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 4))) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_PRESENT_CONTROL,
+ NVDEF(NV507C, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING) |
+ NVVAL(NV507C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0));
+
+ PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_ISO, 0x00000000);
+ return 0;
}
-static void
+static int
base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 13))) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, asyw->image.mode << 8 |
- asyw->image.interval << 4);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, asyw->image.handle[0]);
- if (asyw->image.format == 0xca) {
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 1);
- evo_data(push, 0x6400);
- } else {
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0);
- evo_data(push, 0);
- }
- evo_mthd(push, 0x0800, 5);
- evo_data(push, asyw->image.offset[0] >> 8);
- evo_data(push, 0x00000000);
- evo_data(push, asyw->image.h << 16 | asyw->image.w);
- evo_data(push, asyw->image.layout << 20 |
- (asyw->image.pitch[0] >> 8) << 8 |
- asyw->image.blocks[0] << 8 |
- asyw->image.blockh);
- evo_data(push, asyw->image.kind << 16 |
- asyw->image.format << 8);
- evo_kick(push, &wndw->wndw);
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_PRESENT_CONTROL,
+ NVVAL(NV507C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVVAL(NV507C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+ if (asyw->image.format == NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16) {
+ PUSH_MTHD(push, NV507C, SET_PROCESSING,
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, ENABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
+ } else {
+ PUSH_MTHD(push, NV507C, SET_PROCESSING,
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
}
+
+ PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NV507C, SURFACE_SET_SIZE(0),
+ NVVAL(NV507C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV507C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE(0),
+ NVVAL(NV507C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout) |
+ NVVAL(NV507C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+ NVVAL(NV507C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV507C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SURFACE_SET_PARAMS(0),
+ NVVAL(NV507C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NV507C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV507C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+ NVDEF(NV507C, SURFACE_SET_PARAMS, LAYOUT, FRM) |
+ NVVAL(NV507C, SURFACE_SET_PARAMS, KIND, asyw->image.kind) |
+ NVDEF(NV507C, SURFACE_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256));
+ return 0;
}
-void
+int
base507c_xlut_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_BASE_LUT_LO,
+ NVDEF(NV507C, SET_BASE_LUT_LO, ENABLE, DISABLE));
+ return 0;
}
-void
+int
base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_BASE_LUT_LO,
+ NVDEF(NV507C, SET_BASE_LUT_LO, ENABLE, USE_CORE_LUT));
+ return 0;
}
int
@@ -115,66 +146,77 @@ base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
struct nvif_device *device)
{
s64 time = nvif_msec(device, 2000ULL,
- u32 data = nouveau_bo_rd32(bo, offset / 4);
- if ((data & 0xc0000000) == 0x40000000)
+ if (NVBO_TD32(bo, offset, NV_DISP_BASE_NOTIFIER_1, _0, STATUS, ==, BEGUN))
break;
usleep_range(1, 2);
);
return time < 0 ? time : 0;
}
-void
+int
base507c_ntfy_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x00a4, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_NOTIFIER, 0x00000000);
+ return 0;
}
-void
+int
base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 3))) {
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset);
- evo_data(push, asyw->ntfy.handle);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_NOTIFIER_CONTROL,
+ NVVAL(NV507C, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken) |
+ NVVAL(NV507C, SET_NOTIFIER_CONTROL, OFFSET, asyw->ntfy.offset >> 2),
+
+ SET_CONTEXT_DMA_NOTIFIER, asyw->ntfy.handle);
+ return 0;
}
void
base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
{
- nouveau_bo_wr32(bo, offset / 4, 0x00000000);
+ NVBO_WR32(bo, offset, NV_DISP_BASE_NOTIFIER_1, _0,
+ NVDEF(NV_DISP_BASE_NOTIFIER_1, _0, STATUS, NOT_BEGUN));
}
-void
+int
base507c_sema_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_SEMAPHORE, 0x00000000);
+ return 0;
}
-void
+int
base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 5))) {
- evo_mthd(push, 0x0088, 4);
- evo_data(push, asyw->sema.offset);
- evo_data(push, asyw->sema.acquire);
- evo_data(push, asyw->sema.release);
- evo_data(push, asyw->sema.handle);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV507C, SET_SEMAPHORE_CONTROL, asyw->sema.offset,
+ SET_SEMAPHORE_ACQUIRE, asyw->sema.acquire,
+ SET_SEMAPHORE_RELEASE, asyw->sema.release,
+ SET_CONTEXT_DMA_SEMAPHORE, asyw->sema.handle);
+ return 0;
}
void
@@ -282,8 +324,9 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
return ret;
}
- ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func,
- false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
+ ret = nvif_notify_ctor(&wndw->wndw.base.user, "kmsBaseNtfy",
+ wndw->notify.func, false,
+ NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
&(struct nvif_notify_uevent_req) {},
sizeof(struct nvif_notify_uevent_req),
sizeof(struct nvif_notify_uevent_rep),
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index f4c05949dd62..18d34096f125 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -21,36 +21,56 @@
*/
#include "base.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl827c.h>
+
+static int
base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 13))) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, asyw->image.mode << 8 |
- asyw->image.interval << 4);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, asyw->image.handle[0]);
- if (asyw->image.format == 0xca) {
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 1);
- evo_data(push, 0x6400);
- } else {
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0);
- evo_data(push, 0);
- }
- evo_mthd(push, 0x0800, 5);
- evo_data(push, asyw->image.offset[0] >> 8);
- evo_data(push, 0x00000000);
- evo_data(push, asyw->image.h << 16 | asyw->image.w);
- evo_data(push, asyw->image.layout << 20 |
- (asyw->image.pitch[0] >> 8) << 8 |
- asyw->image.blocks[0] << 8 |
- asyw->image.blockh);
- evo_data(push, asyw->image.format << 8);
- evo_kick(push, &wndw->wndw);
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV827C, SET_PRESENT_CONTROL,
+ NVVAL(NV827C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVVAL(NV827C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV827C, SET_CONTEXT_DMAS_ISO(0), asyw->image.handle, 1);
+
+ if (asyw->image.format == NV827C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16) {
+ PUSH_MTHD(push, NV827C, SET_PROCESSING,
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, ENABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
+ } else {
+ PUSH_MTHD(push, NV827C, SET_PROCESSING,
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
}
+
+ PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
+ SURFACE_SET_OFFSET(0, 1), 0x00000000,
+
+ SURFACE_SET_SIZE(0),
+ NVVAL(NV827C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV827C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE(0),
+ NVVAL(NV827C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV827C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+ NVVAL(NV827C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV827C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS(0),
+ NVVAL(NV827C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NV827C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV827C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+ NVDEF(NV827C, SURFACE_SET_PARAMS, LAYOUT, FRM));
+ return 0;
}
static const struct nv50_wndw_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 224a34c340fe..5396e3707cc4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -21,58 +21,86 @@
*/
#include "base.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907c.h>
+
+static int
base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 10))) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, asyw->image.mode << 8 |
- asyw->image.interval << 4);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, asyw->image.handle[0]);
- evo_mthd(push, 0x0400, 5);
- evo_data(push, asyw->image.offset[0] >> 8);
- evo_data(push, 0x00000000);
- evo_data(push, asyw->image.h << 16 | asyw->image.w);
- evo_data(push, asyw->image.layout << 24 |
- (asyw->image.pitch[0] >> 8) << 8 |
- asyw->image.blocks[0] << 8 |
- asyw->image.blockh);
- evo_data(push, asyw->image.format << 8);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 10)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_PRESENT_CONTROL,
+ NVVAL(NV907C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NV907C, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE) |
+ NVVAL(NV907C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV907C, SET_CONTEXT_DMAS_ISO(0), asyw->image.handle, 1);
+
+ PUSH_MTHD(push, NV907C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
+ SURFACE_SET_OFFSET(0, 1), 0x00000000,
+
+ SURFACE_SET_SIZE(0),
+ NVVAL(NV907C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV907C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE(0),
+ NVVAL(NV907C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV907C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
+ NVVAL(NV907C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV907C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS(0),
+ NVVAL(NV907C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NV907C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV907C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
+ NVDEF(NV907C, SURFACE_SET_PARAMS, LAYOUT, FRM));
+ return 0;
}
-static void
+static int
base907c_xlut_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 6))) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00e8, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00fc, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 6)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_BASE_LUT_LO,
+ NVDEF(NV907C, SET_BASE_LUT_LO, ENABLE, DISABLE));
+
+ PUSH_MTHD(push, NV907C, SET_OUTPUT_LUT_LO,
+ NVDEF(NV907C, SET_OUTPUT_LUT_LO, ENABLE, DISABLE));
+
+ PUSH_MTHD(push, NV907C, SET_CONTEXT_DMA_LUT, 0x00000000);
+ return 0;
}
-static void
+static int
base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 6))) {
- evo_mthd(push, 0x00e0, 3);
- evo_data(push, asyw->xlut.i.enable << 30 |
- asyw->xlut.i.mode << 24);
- evo_data(push, asyw->xlut.i.offset >> 8);
- evo_data(push, 0x40000000);
- evo_mthd(push, 0x00fc, 1);
- evo_data(push, asyw->xlut.handle);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 6)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_BASE_LUT_LO,
+ NVVAL(NV907C, SET_BASE_LUT_LO, ENABLE, asyw->xlut.i.enable) |
+ NVVAL(NV907C, SET_BASE_LUT_LO, MODE, asyw->xlut.i.mode),
+
+ SET_BASE_LUT_HI, asyw->xlut.i.offset >> 8,
+
+ SET_OUTPUT_LUT_LO,
+ NVDEF(NV907C, SET_OUTPUT_LUT_LO, ENABLE, USE_CORE_LUT));
+
+ PUSH_MTHD(push, NV907C, SET_CONTEXT_DMA_LUT, asyw->xlut.handle);
+ return 0;
}
static bool
@@ -81,8 +109,12 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
if (size != 256 && size != 1024)
return false;
- asyw->xlut.i.mode = size == 1024 ? 4 : 7;
- asyw->xlut.i.enable = 2;
+ if (size == 1024)
+ asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
+ else
+ asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE;
+
+ asyw->xlut.i.enable = NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE;
asyw->xlut.i.load = head907d_olut_load;
return true;
}
@@ -125,28 +157,35 @@ base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
}
}
-static void
+static int
base907c_csc_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x0140, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_CSC_RED2RED,
+ NVDEF(NV907C, SET_CSC_RED2RED, OWNER, CORE));
+ return 0;
}
-static void
+static int
base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push, i;
- if ((push = evo_wait(&wndw->wndw, 13))) {
- evo_mthd(push, 0x0140, 12);
- evo_data(push, asyw->csc.matrix[0] | 0x80000000);
- for (i = 1; i < 12; i++)
- evo_data(push, asyw->csc.matrix[i]);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV907C, SET_CSC_RED2RED,
+ NVDEF(NV907C, SET_CSC_RED2RED, OWNER, BASE) |
+ NVVAL(NV907C, SET_CSC_RED2RED, COEFF, asyw->csc.matrix[0]),
+
+ SET_CSC_GRN2RED, &asyw->csc.matrix[1], 11);
+ return 0;
}
const struct nv50_wndw_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index e021cb340569..498622c0c670 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -15,15 +15,15 @@ int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
void nv50_core_del(struct nv50_core **);
struct nv50_core_func {
- void (*init)(struct nv50_core *);
+ int (*init)(struct nv50_core *);
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
- void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
+ int (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
struct {
- void (*owner)(struct nv50_core *);
+ int (*owner)(struct nv50_core *);
} wndw;
const struct nv50_head_func *head;
@@ -31,7 +31,7 @@ struct nv50_core_func {
const struct nv50_crc_func *crc;
#endif
const struct nv50_outp_func {
- void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
+ int (*ctrl)(struct nv50_core *, int or, u32 ctrl,
struct nv50_head_atom *);
/* XXX: Only used by SORs and PIORs for now */
void (*get_caps)(struct nv50_disp *,
@@ -42,11 +42,11 @@ struct nv50_core_func {
int core507d_new(struct nouveau_drm *, s32, struct nv50_core **);
int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
struct nv50_core **);
-void core507d_init(struct nv50_core *);
+int core507d_init(struct nv50_core *);
void core507d_ntfy_init(struct nouveau_bo *, u32);
int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
-void core507d_update(struct nv50_core *, u32 *, bool);
+int core507d_update(struct nv50_core *, u32 *, bool);
extern const struct nv50_outp_func dac507d;
extern const struct nv50_outp_func sor507d;
@@ -63,8 +63,8 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
-void corec37d_update(struct nv50_core *, u32 *, bool);
-void corec37d_wndw_owner(struct nv50_core *);
+int corec37d_update(struct nv50_core *, u32 *, bool);
+int corec37d_wndw_owner(struct nv50_core *);
extern const struct nv50_outp_func sorc37d;
int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index 1d66f694b945..ad1f09a143aa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -23,25 +23,36 @@
#include "head.h"
#include <nvif/cl507d.h>
+#include <nvif/push507c.h>
#include <nvif/timer.h>
+#include <nvhw/class/cl507d.h>
+
#include "nouveau_bo.h"
-void
+int
core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
- u32 *push;
- if ((push = evo_wait(&core->chan, 5))) {
- if (ntfy) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | NV50_DISP_CORE_NTFY);
- }
- evo_mthd(push, 0x0080, 2);
- evo_data(push, interlock[NV50_DISP_INTERLOCK_BASE] |
- interlock[NV50_DISP_INTERLOCK_OVLY]);
- evo_data(push, 0x00000000);
- evo_kick(push, &core->chan);
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ if (ntfy) {
+ PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
}
+
+ PUSH_MTHD(push, NV507D, UPDATE, interlock[NV50_DISP_INTERLOCK_BASE] |
+ interlock[NV50_DISP_INTERLOCK_OVLY] |
+ NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) |
+ NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) |
+ NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+ return PUSH_KICK(push);
}
int
@@ -49,7 +60,7 @@ core507d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
struct nvif_device *device)
{
s64 time = nvif_msec(device, 2000ULL,
- if (nouveau_bo_rd32(bo, offset / 4))
+ if (NVBO_TD32(bo, offset, NV_DISP_CORE_NOTIFIER_1, COMPLETION_0, DONE, ==, TRUE))
break;
usleep_range(1, 2);
);
@@ -59,32 +70,34 @@ core507d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
void
core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
{
- nouveau_bo_wr32(bo, offset / 4, 0x00000000);
+ NVBO_WR32(bo, offset, NV_DISP_CORE_NOTIFIER_1, COMPLETION_0,
+ NVDEF(NV_DISP_CORE_NOTIFIER_1, COMPLETION_0, DONE, FALSE));
}
int
core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
{
- u32 *push = evo_wait(&disp->core->chan, 2);
+ struct nvif_push *push = disp->core->chan.push;
+ int ret;
- if (push) {
- evo_mthd(push, 0x008c, 1);
- evo_data(push, 0x0);
- evo_kick(push, &disp->core->chan);
- }
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
- return 0;
+ PUSH_MTHD(push, NV507D, GET_CAPABILITIES, 0x00000000);
+ return PUSH_KICK(push);
}
-void
+int
core507d_init(struct nv50_core *core)
{
- u32 *push;
- if ((push = evo_wait(&core->chan, 2))) {
- evo_mthd(push, 0x0088, 1);
- evo_data(push, core->chan.sync.handle);
- evo_kick(push, &core->chan);
- }
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+ return PUSH_KICK(push);
}
static const struct nv50_core_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index ec83189a1d48..9035d3ab062c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -23,56 +23,67 @@
#include "head.h"
#include <nvif/class.h>
-#include <nouveau_bo.h>
-
+#include <nvif/pushc37b.h>
#include <nvif/timer.h>
-void
+#include <nvhw/class/clc37d.h>
+
+#include <nouveau_bo.h>
+
+int
corec37d_wndw_owner(struct nv50_core *core)
{
+ struct nvif_push *push = core->chan.push;
const u32 windows = 8; /*XXX*/
- u32 *push, i;
- if ((push = evo_wait(&core->chan, 2 * windows))) {
- for (i = 0; i < windows; i++) {
- evo_mthd(push, 0x1000 + (i * 0x080), 1);
- evo_data(push, i >> 1);
- }
- evo_kick(push, &core->chan);
+ int ret, i;
+
+ if ((ret = PUSH_WAIT(push, windows * 2)))
+ return ret;
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVC37D, WINDOW_SET_CONTROL(i),
+ NVDEF(NVC37D, WINDOW_SET_CONTROL, OWNER, HEAD(i >> 1)));
}
+
+ return 0;
}
-void
+int
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
- u32 *push;
- if ((push = evo_wait(&core->chan, 9))) {
- if (ntfy) {
- evo_mthd(push, 0x020c, 1);
- evo_data(push, 0x00001000 | NV50_DISP_CORE_NTFY);
- }
-
- evo_mthd(push, 0x0218, 2);
- evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS]);
- evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
- evo_mthd(push, 0x0200, 1);
- evo_data(push, 0x00000001);
-
- if (ntfy) {
- evo_mthd(push, 0x020c, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, &core->chan);
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 9)))
+ return ret;
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVC37D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVC37D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVVAL(NVC37D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 4) |
+ NVDEF(NVC37D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
}
+
+ PUSH_MTHD(push, NVC37D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS],
+ SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+ PUSH_MTHD(push, NVC37D, UPDATE, 0x00000001 |
+ NVDEF(NVC37D, UPDATE, SPECIAL_HANDLING, NONE) |
+ NVDEF(NVC37D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVC37D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVC37D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+ }
+
+ return PUSH_KICK(push);
}
int
corec37d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
struct nvif_device *device)
{
- u32 data;
s64 time = nvif_msec(device, 2000ULL,
- data = nouveau_bo_rd32(bo, offset / 4 + 0);
- if ((data & 0xc0000000) == 0x80000000)
+ if (NVBO_TD32(bo, offset, NV_DISP_NOTIFIER, _0, STATUS, ==, FINISHED))
break;
usleep_range(1, 2);
);
@@ -82,18 +93,19 @@ corec37d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
void
corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
{
- nouveau_bo_wr32(bo, offset / 4 + 0, 0x00000000);
- nouveau_bo_wr32(bo, offset / 4 + 1, 0x00000000);
- nouveau_bo_wr32(bo, offset / 4 + 2, 0x00000000);
- nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _0,
+ NVDEF(NV_DISP_NOTIFIER, _0, STATUS, NOT_BEGUN));
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _1, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _2, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _3, 0);
}
int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
{
int ret;
- ret = nvif_object_init(&disp->disp->object, 0, GV100_DISP_CAPS,
- NULL, 0, &disp->caps);
+ ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0,
+ GV100_DISP_CAPS, NULL, 0, &disp->caps);
if (ret) {
NV_ERROR(drm,
"Failed to init notifier caps region: %d\n",
@@ -112,24 +124,37 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
return 0;
}
-static void
+static int
corec37d_init(struct nv50_core *core)
{
+ struct nvif_push *push = core->chan.push;
const u32 windows = 8; /*XXX*/
- u32 *push, i;
- if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
- evo_mthd(push, 0x0208, 1);
- evo_data(push, core->chan.sync.handle);
- for (i = 0; i < windows; i++) {
- evo_mthd(push, 0x1004 + (i * 0x080), 2);
- evo_data(push, 0x0000001f);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x1010 + (i * 0x080), 1);
- evo_data(push, 0x00127fff);
- }
- evo_kick(push, &core->chan);
- core->assign_windows = true;
+ int ret, i;
+
+ if ((ret = PUSH_WAIT(push, 2 + windows * 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, YUV_PACKED422, TRUE),
+
+ WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+ PUSH_MTHD(push, NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+ NVVAL(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_LUT, USAGE_1025) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE));
}
+
+ core->assign_windows = true;
+ return PUSH_KICK(push);
}
static const struct nv50_core_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index e1c11eba0ce1..75876546eac1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -22,24 +22,40 @@
#include "core.h"
#include "head.h"
-static void
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57d.h>
+
+static int
corec57d_init(struct nv50_core *core)
{
+ struct nvif_push *push = core->chan.push;
const u32 windows = 8; /*XXX*/
- u32 *push, i;
- if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
- evo_mthd(push, 0x0208, 1);
- evo_data(push, core->chan.sync.handle);
- for (i = 0; i < windows; i++) {
- evo_mthd(push, 0x1004 + (i * 0x080), 2);
- evo_data(push, 0x0000000f);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x1010 + (i * 0x080), 1);
- evo_data(push, 0x00117fff);
- }
- evo_kick(push, &core->chan);
- core->assign_windows = true;
+ int ret, i;
+
+ if ((ret = PUSH_WAIT(push, 2 + windows * 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE),
+
+ WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+ PUSH_MTHD(push, NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+ NVVAL(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE));
}
+
+ core->assign_windows = true;
+ return PUSH_KICK(push);
}
static const struct nv50_core_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index f17fb6d56757..b8c31b697797 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -9,6 +9,8 @@
#include <nvif/cl0002.h>
#include <nvif/timer.h>
+#include <nvhw/class/cl907d.h>
+
#include "nouveau_drv.h"
#include "core.h"
#include "head.h"
@@ -478,10 +480,6 @@ void nv50_crc_atomic_clr(struct nv50_head *head)
func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL, 0);
}
-#define NV50_CRC_RASTER_ACTIVE 0
-#define NV50_CRC_RASTER_COMPLETE 1
-#define NV50_CRC_RASTER_INACTIVE 2
-
static inline int
nv50_crc_raster_type(enum nv50_crc_source source)
{
@@ -490,11 +488,11 @@ nv50_crc_raster_type(enum nv50_crc_source source)
case NV50_CRC_SOURCE_AUTO:
case NV50_CRC_SOURCE_RG:
case NV50_CRC_SOURCE_OUTP_ACTIVE:
- return NV50_CRC_RASTER_ACTIVE;
+ return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER;
case NV50_CRC_SOURCE_OUTP_COMPLETE:
- return NV50_CRC_RASTER_COMPLETE;
+ return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER;
case NV50_CRC_SOURCE_OUTP_INACTIVE:
- return NV50_CRC_RASTER_INACTIVE;
+ return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER;
}
return 0;
@@ -510,11 +508,11 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
struct nv50_core *core = nv50_disp(head->base.base.dev)->core;
int ret;
- ret = nvif_mem_init_map(mmu, NVIF_MEM_VRAM, len, &ctx->mem);
+ ret = nvif_mem_ctor_map(mmu, "kmsCrcNtfy", NVIF_MEM_VRAM, len, &ctx->mem);
if (ret)
return ret;
- ret = nvif_object_init(&core->chan.base.user,
+ ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
NV50_DISP_HANDLE_CRC_CTX(head, idx),
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
@@ -531,15 +529,15 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
return 0;
fail_fini:
- nvif_mem_fini(&ctx->mem);
+ nvif_mem_dtor(&ctx->mem);
return ret;
}
static inline void
nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx *ctx)
{
- nvif_object_fini(&ctx->ntfy);
- nvif_mem_fini(&ctx->mem);
+ nvif_object_dtor(&ctx->ntfy);
+ nvif_mem_dtor(&ctx->mem);
}
int nv50_crc_set_source(struct drm_crtc *crtc, const char *source_str)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h
index 4bc59e779315..4fce871b04c8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h
@@ -50,9 +50,9 @@ struct nv50_crc_atom {
};
struct nv50_crc_func {
- void (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type,
- struct nv50_crc_notifier_ctx *, u32 wndw);
- void (*set_ctx)(struct nv50_head *, struct nv50_crc_notifier_ctx *);
+ int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type,
+ struct nv50_crc_notifier_ctx *, u32 wndw);
+ int (*set_ctx)(struct nv50_head *, struct nv50_crc_notifier_ctx *);
u32 (*get_entry)(struct nv50_head *, struct nv50_crc_notifier_ctx *,
enum nv50_crc_source, int idx);
bool (*ctx_finished)(struct nv50_head *,
@@ -106,26 +106,27 @@ struct nv50_crc_atom {};
#define nv50_crc_set_source NULL
static inline void nv50_crc_init(struct drm_device *dev) {}
-static inline int nv50_head_crc_late_register(struct nv50_head *) {}
-static inline void
-nv50_crc_handle_vblank(struct nv50_head *head) { return 0; }
+static inline int
+nv50_head_crc_late_register(struct nv50_head *head) { return 0; }
+static inline void nv50_crc_handle_vblank(struct nv50_head *head) {}
static inline int
-nv50_crc_atomic_check_head(struct nv50_head *, struct nv50_head_atom *,
- struct nv50_head_atom *) {}
+nv50_crc_atomic_check_head(struct nv50_head *head,
+ struct nv50_head_atom *asyh,
+ struct nv50_head_atom *armh) { return 0; }
static inline void nv50_crc_atomic_check_outp(struct nv50_atom *atom) {}
static inline void
-nv50_crc_atomic_stop_reporting(struct drm_atomic_state *) {}
+nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state) {}
static inline void
-nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *) {}
+nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state) {}
static inline void
-nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *) {}
+nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state) {}
static inline void
-nv50_crc_atomic_start_reporting(struct drm_atomic_state *) {}
+nv50_crc_atomic_start_reporting(struct drm_atomic_state *state) {}
static inline void
-nv50_crc_atomic_set(struct nv50_head *, struct nv50_head_atom *) {}
+nv50_crc_atomic_set(struct nv50_head *head, struct nv50_head_atom *state) {}
static inline void
-nv50_crc_atomic_clr(struct nv50_head *) {}
+nv50_crc_atomic_clr(struct nv50_head *head) {}
#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
#endif /* !__NV50_CRC_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
index 92e907de7645..0fb0fdb9f119 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
@@ -6,6 +6,10 @@
#include "disp.h"
#include "head.h"
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
#define CRC907D_MAX_ENTRIES 255
struct crc907d_notifier {
@@ -18,68 +22,67 @@ struct crc907d_notifier {
} entries[CRC907D_MAX_ENTRIES];
} __packed;
-static void
+static int
crc907d_set_src(struct nv50_head *head, int or,
enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx, u32 wndw)
{
- struct drm_crtc *crtc = &head->base.base;
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- const u32 hoff = head->base.index * 0x300;
- u32 *push;
- u32 crc_args = 0xfff00000;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 crc_args = NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
+ int ret;
switch (source) {
case NV50_CRC_SOURCE_TYPE_SOR:
- crc_args |= (0x00000f0f + or * 16) << 8;
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, SOR(or));
break;
case NV50_CRC_SOURCE_TYPE_PIOR:
- crc_args |= (0x000000ff + or * 256) << 8;
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, PIOR(or));
break;
case NV50_CRC_SOURCE_TYPE_DAC:
- crc_args |= (0x00000ff0 + or) << 8;
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, DAC(or));
break;
case NV50_CRC_SOURCE_TYPE_RG:
- crc_args |= (0x00000ff8 + drm_crtc_index(crtc)) << 8;
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, RG(i));
break;
case NV50_CRC_SOURCE_TYPE_SF:
- crc_args |= (0x00000f8f + drm_crtc_index(crtc) * 16) << 8;
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, SF(i));
break;
case NV50_CRC_SOURCE_NONE:
- crc_args |= 0x000fff00;
+ crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE);
break;
}
- push = evo_wait(core, 4);
- if (!push)
- return;
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
if (source) {
- evo_mthd(push, 0x0438 + hoff, 1);
- evo_data(push, ctx->ntfy.handle);
- evo_mthd(push, 0x0430 + hoff, 1);
- evo_data(push, crc_args);
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
+ PUSH_MTHD(push, NV907D, HEAD_SET_CRC_CONTROL(i), crc_args);
} else {
- evo_mthd(push, 0x0430 + hoff, 1);
- evo_data(push, crc_args);
- evo_mthd(push, 0x0438 + hoff, 1);
- evo_data(push, 0);
+ PUSH_MTHD(push, NV907D, HEAD_SET_CRC_CONTROL(i), crc_args);
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
}
- evo_kick(push, core);
+
+ return 0;
}
-static void crc907d_set_ctx(struct nv50_head *head,
- struct nv50_crc_notifier_ctx *ctx)
+static int
+crc907d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push = evo_wait(core, 2);
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
- if (!push)
- return;
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
- evo_mthd(push, 0x0438 + (head->base.index * 0x300), 1);
- evo_data(push, ctx ? ctx->ntfy.handle : 0);
- evo_kick(push, core);
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx ? ctx->ntfy.handle : 0);
+ return 0;
}
static u32 crc907d_get_entry(struct nv50_head *head,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
index 940cefd5517d..9afe9a87bde0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
@@ -6,6 +6,10 @@
#include "disp.h"
#include "head.h"
+#include <nvif/push507c.h>
+
+#include <nvhw/class/clc37d.h>
+
#define CRCC37D_MAX_ENTRIES 2047
struct crcc37d_notifier {
@@ -30,62 +34,59 @@ struct crcc37d_notifier {
} entries[CRCC37D_MAX_ENTRIES];
} __packed;
-static void
+static int
crcc37d_set_src(struct nv50_head *head, int or,
enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx, u32 wndw)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- const u32 hoff = head->base.index * 0x400;
- u32 *push;
- u32 crc_args;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, wndw) |
+ NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+ NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
+ int ret;
switch (source) {
case NV50_CRC_SOURCE_TYPE_SOR:
- crc_args = (0x00000050 + or) << 12;
+ crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or));
break;
case NV50_CRC_SOURCE_TYPE_PIOR:
- crc_args = (0x00000060 + or) << 12;
+ crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, PIOR(or));
break;
case NV50_CRC_SOURCE_TYPE_SF:
- crc_args = 0x00000030 << 12;
+ crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF);
break;
default:
- crc_args = 0;
break;
}
- push = evo_wait(core, 4);
- if (!push)
- return;
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
if (source) {
- evo_mthd(push, 0x2180 + hoff, 1);
- evo_data(push, ctx->ntfy.handle);
- evo_mthd(push, 0x2184 + hoff, 1);
- evo_data(push, crc_args | wndw);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CRC_CONTROL(i), crc_args);
} else {
- evo_mthd(push, 0x2184 + hoff, 1);
- evo_data(push, 0);
- evo_mthd(push, 0x2180 + hoff, 1);
- evo_data(push, 0);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CRC_CONTROL(i), 0);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
}
- evo_kick(push, core);
+ return 0;
}
-static void crcc37d_set_ctx(struct nv50_head *head,
- struct nv50_crc_notifier_ctx *ctx)
+static int
+crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push = evo_wait(core, 2);
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
- if (!push)
- return;
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
- evo_mthd(push, 0x2180 + (head->base.index * 0x400), 1);
- evo_data(push, ctx ? ctx->ntfy.handle : 0);
- evo_kick(push, core);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx ? ctx->ntfy.handle : 0);
+ return 0;
}
static u32 crcc37d_get_entry(struct nv50_head *head,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 658a200ab616..54fbd6fe751d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -26,6 +26,8 @@
#include <nvif/cl507a.h>
#include <nvif/timer.h>
+#include <nvhw/class/cl507a.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -33,27 +35,37 @@ bool
curs507a_space(struct nv50_wndw *wndw)
{
nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
- if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
+ if (NVIF_TV32(&wndw->wimm.base.user, NV507A, FREE, COUNT, >=, 4))
return true;
);
+
WARN_ON(1);
return false;
}
-static void
+static int
curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
{
- if (curs507a_space(wndw))
- nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
+ struct nvif_object *user = &wndw->wimm.base.user;
+ int ret = nvif_chan_wait(&wndw->wimm, 1);
+ if (ret == 0) {
+ NVIF_WR32(user, NV507A, UPDATE,
+ NVDEF(NV507A, UPDATE, INTERLOCK_WITH_CORE, DISABLE));
+ }
+ return ret;
}
-static void
+static int
curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- if (curs507a_space(wndw)) {
- nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
- asyw->point.x);
+ struct nvif_object *user = &wndw->wimm.base.user;
+ int ret = nvif_chan_wait(&wndw->wimm, 1);
+ if (ret == 0) {
+ NVIF_WR32(user, NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT,
+ NVVAL(NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT, X, asyw->point.x) |
+ NVVAL(NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT, Y, asyw->point.y));
}
+ return ret;
}
const struct nv50_wimm_func
@@ -138,8 +150,8 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
if (*pwndw = wndw, ret)
return ret;
- ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
- sizeof(args), &wndw->wimm.base.user);
+ ret = nvif_object_ctor(&disp->disp->object, "kmsCurs", 0, oclass,
+ &args, sizeof(args), &wndw->wimm.base.user);
if (ret) {
NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
index 96dff4f09f57..e39d08698c63 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
@@ -22,20 +22,29 @@
#include "curs.h"
#include "atom.h"
-static void
+#include <nvhw/class/clc37a.h>
+
+static int
cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
{
- if (curs507a_space(wndw))
- nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
+ struct nvif_object *user = &wndw->wimm.base.user;
+ int ret = nvif_chan_wait(&wndw->wimm, 1);
+ if (ret == 0)
+ NVIF_WR32(user, NVC37A, UPDATE, 0x00000001);
+ return ret;
}
-static void
+static int
cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- if (curs507a_space(wndw)) {
- nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
- asyw->point.x);
+ struct nvif_object *user = &wndw->wimm.base.user;
+ int ret = nvif_chan_wait(&wndw->wimm, 1);
+ if (ret == 0) {
+ NVIF_WR32(user, NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT(0),
+ NVVAL(NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT, X, asyw->point.x) |
+ NVVAL(NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT, Y, asyw->point.y));
}
+ return ret;
}
static const struct nv50_wimm_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
index 2a10ef7d30a8..09de78d96679 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
@@ -21,21 +21,29 @@
*/
#include "core.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+
+static int
dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- u32 *push, sync = 0;
- if ((push = evo_wait(&core->chan, 3))) {
- if (asyh) {
- sync |= asyh->or.nvsync << 1;
- sync |= asyh->or.nhsync;
- }
- evo_mthd(push, 0x0400 + (or * 0x080), 2);
- evo_data(push, ctrl);
- evo_data(push, sync);
- evo_kick(push, &core->chan);
+ struct nvif_push *push = core->chan.push;
+ u32 sync = 0;
+ int ret;
+
+ if (asyh) {
+ sync |= NVVAL(NV507D, DAC_SET_POLARITY, HSYNC, asyh->or.nhsync);
+ sync |= NVVAL(NV507D, DAC_SET_POLARITY, VSYNC, asyh->or.nvsync);
}
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, DAC_SET_CONTROL(or), ctrl,
+ DAC_SET_POLARITY(or), sync);
+ return 0;
}
const struct nv50_outp_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
index 11e87fa53fac..95efa625b691 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
@@ -21,16 +21,22 @@
*/
#include "core.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
+static int
dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- u32 *push;
- if ((push = evo_wait(&core->chan, 2))) {
- evo_mthd(push, 0x0180 + (or * 0x020), 1);
- evo_data(push, ctrl);
- evo_kick(push, &core->chan);
- }
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, DAC_SET_CONTROL(or), ctrl);
+ return 0;
}
const struct nv50_outp_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index cd71b9876c8a..64bac6718457 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -41,6 +41,8 @@
#include <drm/drm_scdc_helper.h>
#include <drm/drm_vblank.h>
+#include <nvif/push507c.h>
+
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/cl5070.h>
@@ -48,6 +50,13 @@
#include <nvif/event.h>
#include <nvif/timer.h>
+#include <nvhw/class/cl507c.h>
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+#include <nvhw/class/cl887d.h>
+#include <nvhw/class/cl907d.h>
+#include <nvhw/class/cl917d.h>
+
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
@@ -79,8 +88,9 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
while (oclass[0]) {
for (i = 0; i < n; i++) {
if (sclass[i].oclass == oclass[0]) {
- ret = nvif_object_init(disp, 0, oclass[0],
- data, size, &chan->user);
+ ret = nvif_object_ctor(disp, "kmsChan", 0,
+ oclass[0], data, size,
+ &chan->user);
if (ret == 0)
nvif_object_map(&chan->user, NULL, 0);
nvif_object_sclass_put(&sclass);
@@ -97,7 +107,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
static void
nv50_chan_destroy(struct nv50_chan *chan)
{
- nvif_object_fini(&chan->user);
+ nvif_object_dtor(&chan->user);
}
/******************************************************************************
@@ -107,12 +117,106 @@ nv50_chan_destroy(struct nv50_chan *chan)
void
nv50_dmac_destroy(struct nv50_dmac *dmac)
{
- nvif_object_fini(&dmac->vram);
- nvif_object_fini(&dmac->sync);
+ nvif_object_dtor(&dmac->vram);
+ nvif_object_dtor(&dmac->sync);
nv50_chan_destroy(&dmac->base);
- nvif_mem_fini(&dmac->push);
+ nvif_mem_dtor(&dmac->_push.mem);
+}
+
+static void
+nv50_dmac_kick(struct nvif_push *push)
+{
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+
+ dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
+ if (dmac->put != dmac->cur) {
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push->mem.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+
+ NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
+ dmac->put = dmac->cur;
+ }
+
+ push->bgn = push->cur;
+}
+
+static int
+nv50_dmac_free(struct nv50_dmac *dmac)
+{
+ u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
+ if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */
+ return get - dmac->cur - 5;
+ return dmac->max - dmac->cur;
+}
+
+static int
+nv50_dmac_wind(struct nv50_dmac *dmac)
+{
+ /* Wait for GET to depart from the beginning of the push buffer to
+ * prevent writing PUT == GET, which would be ignored by HW.
+ */
+ u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
+ if (get == 0) {
+ /* Corner-case, HW idle, but non-committed work pending. */
+ if (dmac->put == 0)
+ nv50_dmac_kick(dmac->push);
+
+ if (nvif_msec(dmac->base.device, 2000,
+ if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
+ break;
+ ) < 0)
+ return -ETIMEDOUT;
+ }
+
+ PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
+ dmac->cur = 0;
+ return 0;
+}
+
+static int
+nv50_dmac_wait(struct nvif_push *push, u32 size)
+{
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+ int free;
+
+ if (WARN_ON(size > dmac->max))
+ return -EINVAL;
+
+ dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
+ if (dmac->cur + size >= dmac->max) {
+ int ret = nv50_dmac_wind(dmac);
+ if (ret)
+ return ret;
+
+ push->cur = dmac->_push.mem.object.map.ptr;
+ push->cur = push->cur + dmac->cur;
+ nv50_dmac_kick(push);
+ }
+
+ if (nvif_msec(dmac->base.device, 2000,
+ if ((free = nv50_dmac_free(dmac)) >= size)
+ break;
+ ) < 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+
+ push->bgn = dmac->_push.mem.object.map.ptr;
+ push->bgn = push->bgn + dmac->cur;
+ push->cur = push->bgn;
+ push->end = push->cur + free;
+ return 0;
}
int
@@ -139,13 +243,21 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
type |= NVIF_MEM_VRAM;
- ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
+ ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
+ &dmac->_push.mem);
if (ret)
return ret;
- dmac->ptr = dmac->push.object.map.ptr;
+ dmac->ptr = dmac->_push.mem.object.map.ptr;
+ dmac->_push.wait = nv50_dmac_wait;
+ dmac->_push.kick = nv50_dmac_kick;
+ dmac->push = &dmac->_push;
+ dmac->push->bgn = dmac->_push.mem.object.map.ptr;
+ dmac->push->cur = dmac->push->bgn;
+ dmac->push->end = dmac->push->bgn;
+ dmac->max = 0x1000/4 - 1;
- args->pushbuf = nvif_handle(&dmac->push.object);
+ args->pushbuf = nvif_handle(&dmac->_push.mem.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
&dmac->base);
@@ -155,7 +267,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (!syncbuf)
return 0;
- ret = nvif_object_init(&dmac->base.user, NV50_DISP_HANDLE_SYNCBUF,
+ ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
@@ -167,7 +279,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- ret = nvif_object_init(&dmac->base.user, NV50_DISP_HANDLE_VRAM,
+ ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
@@ -183,64 +295,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
}
/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static void
-evo_flush(struct nv50_dmac *dmac)
-{
- /* Push buffer fetches are not coherent with BAR1, we need to ensure
- * writes have been flushed right through to VRAM before writing PUT.
- */
- if (dmac->push.type & NVIF_MEM_VRAM) {
- struct nvif_device *device = dmac->base.device;
- nvif_wr32(&device->object, 0x070000, 0x00000001);
- nvif_msec(device, 2000,
- if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
- break;
- );
- }
-}
-
-u32 *
-evo_wait(struct nv50_dmac *evoc, int nr)
-{
- struct nv50_dmac *dmac = evoc;
- struct nvif_device *device = dmac->base.device;
- u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
-
- mutex_lock(&dmac->lock);
- if (put + nr >= (PAGE_SIZE / 4) - 8) {
- dmac->ptr[put] = 0x20000000;
- evo_flush(dmac);
-
- nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
- if (nvif_msec(device, 2000,
- if (!nvif_rd32(&dmac->base.user, 0x0004))
- break;
- ) < 0) {
- mutex_unlock(&dmac->lock);
- pr_err("nouveau: evo channel stalled\n");
- return NULL;
- }
-
- put = 0;
- }
-
- return dmac->ptr + put;
-}
-
-void
-evo_kick(u32 *push, struct nv50_dmac *evoc)
-{
- struct nv50_dmac *dmac = evoc;
-
- evo_flush(dmac);
-
- nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
- mutex_unlock(&dmac->lock);
-}
-
-/******************************************************************************
* Output path helpers
*****************************************************************************/
static void
@@ -365,8 +419,9 @@ nv50_dac_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
+ const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
if (nv_encoder->crtc)
- core->func->dac->ctrl(core, nv_encoder->or, 0x00000000, NULL);
+ core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
@@ -378,10 +433,23 @@ nv50_dac_enable(struct drm_encoder *encoder)
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
+ u32 ctrl = 0;
+
+ switch (nv_crtc->index) {
+ case 0: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD0); break;
+ case 1: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD1); break;
+ case 2: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD2); break;
+ case 3: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD3); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
nv50_outp_acquire(nv_encoder, false);
- core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
+ core->func->dac->ctrl(core, nv_encoder->or, ctrl, asyh);
asyh->or.depth = 0;
nv_encoder->crtc = encoder->crtc;
@@ -586,6 +654,9 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
(0x0100 << nv_crtc->index),
};
+ if (!nv_encoder->audio)
+ return;
+
nv_encoder->audio = false;
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
@@ -928,10 +999,10 @@ static u8
nv50_dp_bpc_to_depth(unsigned int bpc)
{
switch (bpc) {
- case 6: return 0x2;
- case 8: return 0x5;
- case 10: /* fall-through */
- default: return 0x6;
+ case 6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444;
+ case 8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444;
+ case 10:
+ default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444;
}
}
@@ -970,9 +1041,9 @@ nv50_msto_enable(struct drm_encoder *encoder)
nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
if (mstm->outp->link & 1)
- proto = 0x8;
+ proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A;
else
- proto = 0x9;
+ proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
mstm->outp->update(mstm->outp, head->base.index, armh, proto,
nv50_dp_bpc_to_depth(armh->or.bpc));
@@ -1501,10 +1572,10 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
if (!asyh) {
nv_encoder->ctrl &= ~BIT(head);
- if (!(nv_encoder->ctrl & 0x0000000f))
+ if (NVDEF_TEST(nv_encoder->ctrl, NV507D, SOR_SET_CONTROL, OWNER, ==, NONE))
nv_encoder->ctrl = 0;
} else {
- nv_encoder->ctrl |= proto << 8;
+ nv_encoder->ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto);
nv_encoder->ctrl |= BIT(head);
asyh->or.depth = depth;
}
@@ -1562,8 +1633,8 @@ nv50_sor_enable(struct drm_encoder *encoder)
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
bool hda = false;
- u8 proto = 0xf;
- u8 depth = 0x0;
+ u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
+ u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_encoder->crtc = encoder->crtc;
@@ -1577,7 +1648,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
if (nv_encoder->link & 1) {
- proto = 0x1;
+ proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A;
/* Only enable dual-link if:
* - Need to (i.e. rate > 165MHz)
* - DCB says we can
@@ -1587,15 +1658,15 @@ nv50_sor_enable(struct drm_encoder *encoder)
if (mode->clock >= 165000 &&
nv_encoder->dcb->duallink_possible &&
!drm_detect_hdmi_monitor(nv_connector->edid))
- proto |= 0x4;
+ proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS;
} else {
- proto = 0x2;
+ proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
}
nv50_hdmi_enable(&nv_encoder->base.base, mode);
break;
case DCB_OUTPUT_LVDS:
- proto = 0x0;
+ proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
@@ -1629,9 +1700,9 @@ nv50_sor_enable(struct drm_encoder *encoder)
depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
if (nv_encoder->link & 1)
- proto = 0x8;
+ proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A;
else
- proto = 0x9;
+ proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
nv50_audio_enable(encoder, mode);
break;
@@ -1766,8 +1837,9 @@ nv50_pior_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
+ const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
if (nv_encoder->crtc)
- core->func->pior->ctrl(core, nv_encoder->or, 0x00000000, NULL);
+ core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
@@ -1779,29 +1851,36 @@ nv50_pior_enable(struct drm_encoder *encoder)
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
- u8 owner = 1 << nv_crtc->index;
- u8 proto;
+ u32 ctrl = 0;
+
+ switch (nv_crtc->index) {
+ case 0: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD0); break;
+ case 1: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD1); break;
+ default:
+ WARN_ON(1);
+ break;
+ }
nv50_outp_acquire(nv_encoder, false);
switch (asyh->or.bpc) {
- case 10: asyh->or.depth = 0x6; break;
- case 8: asyh->or.depth = 0x5; break;
- case 6: asyh->or.depth = 0x2; break;
- default: asyh->or.depth = 0x0; break;
+ case 10: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; break;
+ case 8: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; break;
+ case 6: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; break;
+ default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
}
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_DP:
- proto = 0x0;
+ ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
break;
default:
BUG();
break;
}
- core->func->pior->ctrl(core, nv_encoder->or, (proto << 8) | owner, asyh);
+ core->func->pior->ctrl(core, nv_encoder->or, ctrl, asyh);
nv_encoder->crtc = encoder->crtc;
}
@@ -2168,8 +2247,10 @@ nv50_disp_atomic_commit(struct drm_device *dev,
int ret, i;
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
return ret;
+ }
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
@@ -2460,7 +2541,7 @@ nv50_display_destroy(struct drm_device *dev)
nv50_audio_component_fini(nouveau_drm(dev));
nvif_object_unmap(&disp->caps);
- nvif_object_fini(&disp->caps);
+ nvif_object_dtor(&disp->caps);
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 1968c6921f9e..92bddc083617 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -2,6 +2,7 @@
#define __NV50_KMS_H__
#include <linux/workqueue.h>
#include <nvif/mem.h>
+#include <nvif/push.h>
#include "nouveau_display.h"
@@ -61,7 +62,8 @@ struct nv50_chan {
struct nv50_dmac {
struct nv50_chan base;
- struct nvif_mem push;
+ struct nvif_push _push;
+ struct nvif_push *push;
u32 *ptr;
struct nvif_object sync;
@@ -71,6 +73,10 @@ struct nv50_dmac {
* grabbed by evo_wait (if the pushbuf reservation is successful) and
* dropped again by evo_kick. */
struct mutex lock;
+
+ u32 cur;
+ u32 put;
+ u32 max;
};
struct nv50_outp_atom {
@@ -106,18 +112,4 @@ void evo_kick(u32 *, struct nv50_dmac *);
extern const u64 disp50xx_modifiers[];
extern const u64 disp90xx_modifiers[];
extern const u64 wndwc57e_modifiers[];
-
-#define evo_mthd(p, m, s) do { \
- const u32 _m = (m), _s = (s); \
- if (drm_debug_enabled(DRM_UT_KMS)) \
- pr_err("%04x %d %s\n", _m, _s, __func__); \
- *((p)++) = ((_s << 18) | _m); \
-} while(0)
-
-#define evo_data(p, d) do { \
- const u32 _d = (d); \
- if (drm_debug_enabled(DRM_UT_KMS)) \
- pr_err("\t%08x\n", _d); \
- *((p)++) = _d; \
-} while(0)
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 9a10ec267d1f..841edfaf5b9d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -106,9 +106,9 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
}
}
- asyh->dither.enable = mode;
- asyh->dither.bits = mode >> 1;
- asyh->dither.mode = mode >> 3;
+ asyh->dither.enable = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, ENABLE);
+ asyh->dither.bits = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, BITS);
+ asyh->dither.mode = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, MODE);
asyh->set.dither = true;
}
@@ -489,7 +489,7 @@ nv50_head_destroy(struct drm_crtc *crtc)
{
struct nv50_head *head = nv50_head(crtc);
- nvif_notify_fini(&head->base.vblank);
+ nvif_notify_dtor(&head->base.vblank);
nv50_lut_fini(&head->olut);
drm_crtc_cleanup(crtc);
kfree(head);
@@ -598,7 +598,7 @@ nv50_head_create(struct drm_device *dev, int index)
}
}
- ret = nvif_notify_init(&disp->disp->object, nv50_head_vblank_handler,
+ ret = nvif_notify_ctor(&disp->disp->object, "kmsVbl", nv50_head_vblank_handler,
false, NV04_DISP_NTFY_VBLANK,
&(struct nvif_notify_head_req_v0) {
.head = nv_crtc->index,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 30501ad1824e..dae841dc05fd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -25,74 +25,72 @@ void nv50_head_flush_clr(struct nv50_head *head,
struct nv50_head_atom *asyh, bool flush);
struct nv50_head_func {
- void (*view)(struct nv50_head *, struct nv50_head_atom *);
- void (*mode)(struct nv50_head *, struct nv50_head_atom *);
+ int (*view)(struct nv50_head *, struct nv50_head_atom *);
+ int (*mode)(struct nv50_head *, struct nv50_head_atom *);
bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
bool olut_identity;
int olut_size;
- void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
- void (*olut_clr)(struct nv50_head *);
+ int (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
+ int (*olut_clr)(struct nv50_head *);
void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
- void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
- void (*core_clr)(struct nv50_head *);
+ int (*core_set)(struct nv50_head *, struct nv50_head_atom *);
+ int (*core_clr)(struct nv50_head *);
int (*curs_layout)(struct nv50_head *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
int (*curs_format)(struct nv50_head *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
- void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
- void (*curs_clr)(struct nv50_head *);
- void (*base)(struct nv50_head *, struct nv50_head_atom *);
- void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
- void (*dither)(struct nv50_head *, struct nv50_head_atom *);
- void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
- void (*or)(struct nv50_head *, struct nv50_head_atom *);
+ int (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
+ int (*curs_clr)(struct nv50_head *);
+ int (*base)(struct nv50_head *, struct nv50_head_atom *);
+ int (*ovly)(struct nv50_head *, struct nv50_head_atom *);
+ int (*dither)(struct nv50_head *, struct nv50_head_atom *);
+ int (*procamp)(struct nv50_head *, struct nv50_head_atom *);
+ int (*or)(struct nv50_head *, struct nv50_head_atom *);
void (*static_wndw_map)(struct nv50_head *, struct nv50_head_atom *);
};
extern const struct nv50_head_func head507d;
-void head507d_view(struct nv50_head *, struct nv50_head_atom *);
-void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
+int head507d_view(struct nv50_head *, struct nv50_head_atom *);
+int head507d_mode(struct nv50_head *, struct nv50_head_atom *);
bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int);
void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
-void head507d_core_clr(struct nv50_head *);
+int head507d_core_clr(struct nv50_head *);
int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
int head507d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
-void head507d_base(struct nv50_head *, struct nv50_head_atom *);
-void head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
-void head507d_dither(struct nv50_head *, struct nv50_head_atom *);
-void head507d_procamp(struct nv50_head *, struct nv50_head_atom *);
+int head507d_base(struct nv50_head *, struct nv50_head_atom *);
+int head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
+int head507d_dither(struct nv50_head *, struct nv50_head_atom *);
+int head507d_procamp(struct nv50_head *, struct nv50_head_atom *);
extern const struct nv50_head_func head827d;
extern const struct nv50_head_func head907d;
-void head907d_view(struct nv50_head *, struct nv50_head_atom *);
-void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
+int head907d_view(struct nv50_head *, struct nv50_head_atom *);
+int head907d_mode(struct nv50_head *, struct nv50_head_atom *);
bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
-void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
-void head907d_olut_clr(struct nv50_head *);
-void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
-void head907d_core_clr(struct nv50_head *);
-void head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
-void head907d_curs_clr(struct nv50_head *);
-void head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
-void head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
-void head907d_or(struct nv50_head *, struct nv50_head_atom *);
+int head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_olut_clr(struct nv50_head *);
+int head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_core_clr(struct nv50_head *);
+int head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+int head907d_curs_clr(struct nv50_head *);
+int head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
+int head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
+int head907d_or(struct nv50_head *, struct nv50_head_atom *);
extern const struct nv50_head_func head917d;
int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
extern const struct nv50_head_func headc37d;
-void headc37d_view(struct nv50_head *, struct nv50_head_atom *);
-void headc37d_core_set(struct nv50_head *, struct nv50_head_atom *);
-void headc37d_core_clr(struct nv50_head *);
+int headc37d_view(struct nv50_head *, struct nv50_head_atom *);
int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
-void headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
-void headc37d_curs_clr(struct nv50_head *);
-void headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
+int headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+int headc37d_curs_clr(struct nv50_head *);
+int headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
extern const struct nv50_head_func headc57d;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 66ccf36b56a2..0edd4e520c8e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -22,111 +22,141 @@
#include "head.h"
#include "core.h"
-void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+
+int
head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
- evo_data(push, asyh->procamp.sat.sin << 20 |
- asyh->procamp.sat.cos << 8);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_PROCAMP(i),
+ NVDEF(NV507D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NV507D, HEAD_SET_PROCAMP, CHROMA_LPF, AUTO) |
+ NVVAL(NV507D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+ NVVAL(NV507D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+ NVDEF(NV507D, HEAD_SET_PROCAMP, TRANSITION, HARD));
+ return 0;
}
-void
+int
head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
- evo_data(push, asyh->dither.mode << 3 |
- asyh->dither.bits << 1 |
- asyh->dither.enable);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+ return 0;
}
-void
+int
head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
u32 bounds = 0;
- u32 *push;
+ int ret;
if (asyh->ovly.cpp) {
switch (asyh->ovly.cpp) {
- case 4: bounds |= 0x00000300; break;
- case 2: bounds |= 0x00000100; break;
+ case 4: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
default:
WARN_ON(1);
break;
}
- bounds |= 0x00000001;
+ bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
} else {
- bounds |= 0x00000100;
+ bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16);
}
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
- evo_data(push, bounds);
- evo_kick(push, core);
- }
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+ return 0;
}
-void
+int
head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
u32 bounds = 0;
- u32 *push;
+ int ret;
if (asyh->base.cpp) {
switch (asyh->base.cpp) {
- case 8: bounds |= 0x00000500; break;
- case 4: bounds |= 0x00000300; break;
- case 2: bounds |= 0x00000100; break;
- case 1: bounds |= 0x00000000; break;
+ case 8: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+ case 4: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+ case 1: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
default:
WARN_ON(1);
break;
}
- bounds |= 0x00000001;
+ bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
}
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
- evo_data(push, bounds);
- evo_kick(push, core);
- }
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+ return 0;
}
-static void
+static int
head507d_curs_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
- evo_data(push, 0x05000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+ return 0;
}
-static void
+static int
head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 3))) {
- evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
- evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
- asyh->curs.format << 24);
- evo_data(push, asyh->curs.offset >> 8);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, SUB_OWNER, NONE),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+ return 0;
}
int
@@ -134,7 +164,7 @@ head507d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
switch (asyw->image.format) {
- case 0xcf: asyh->curs.format = 1; break;
+ case 0xcf: asyh->curs.format = NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8; break;
default:
WARN_ON(1);
return -EINVAL;
@@ -147,54 +177,70 @@ head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
switch (asyw->image.w) {
- case 32: asyh->curs.layout = 0; break;
- case 64: asyh->curs.layout = 1; break;
+ case 32: asyh->curs.layout = NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32; break;
+ case 64: asyh->curs.layout = NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64; break;
default:
return -EINVAL;
}
return 0;
}
-void
+int
head507d_core_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_CONTEXT_DMA_ISO(i), 0x00000000);
+ return 0;
}
-static void
+static int
head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 9))) {
- evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
- evo_data(push, asyh->core.offset >> 8);
- evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
- evo_data(push, asyh->core.h << 16 | asyh->core.w);
- evo_data(push, asyh->core.layout << 20 |
- (asyh->core.pitch >> 8) << 8 |
- asyh->core.blocks << 8 |
- asyh->core.blockh);
- evo_data(push, asyh->core.kind << 16 |
- asyh->core.format << 8);
- evo_data(push, asyh->core.handle);
- evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
- evo_data(push, asyh->core.y << 16 | asyh->core.x);
- evo_kick(push, core);
-
- /* EVO will complain with INVALID_STATE if we have an
- * active cursor and (re)specify HeadSetContextDmaIso
- * without also updating HeadSetOffsetCursor.
- */
- asyh->set.curs = asyh->curs.visible;
- asyh->set.olut = asyh->olut.handle != 0;
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 9)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_OFFSET(i, 0),
+ NVVAL(NV507D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_SIZE(i),
+ NVVAL(NV507D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+ NVVAL(NV507D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+ HEAD_SET_STORAGE(i),
+ NVVAL(NV507D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+ NVVAL(NV507D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+ NVVAL(NV507D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+ NVVAL(NV507D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+ HEAD_SET_PARAMS(i),
+ NVVAL(NV507D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+ NVVAL(NV507D, HEAD_SET_PARAMS, KIND, asyh->core.kind) |
+ NVDEF(NV507D, HEAD_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256),
+
+ HEAD_SET_CONTEXT_DMA_ISO(i),
+ NVVAL(NV507D, HEAD_SET_CONTEXT_DMA_ISO, HANDLE, asyh->core.handle));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_POINT_IN(i, 0),
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+
+ /* EVO will complain with INVALID_STATE if we have an
+ * active cursor and (re)specify HeadSetContextDmaIso
+ * without also updating HeadSetOffsetCursor.
+ */
+ asyh->set.curs = asyh->curs.visible;
+ asyh->set.olut = asyh->olut.handle != 0;
+ return 0;
}
void
@@ -221,37 +267,47 @@ head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
}
asyh->core.handle = disp->core->chan.vram.handle;
asyh->core.offset = 0;
- asyh->core.format = 0xcf;
- asyh->core.kind = 0;
- asyh->core.layout = 1;
- asyh->core.blockh = 0;
+ asyh->core.format = NV507D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8;
+ asyh->core.kind = NV507D_HEAD_SET_PARAMS_KIND_KIND_PITCH;
+ asyh->core.layout = NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH;
+ asyh->core.blockh = NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB;
asyh->core.blocks = 0;
asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
}
-static void
+static int
head507d_olut_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_BASE_LUT_LO(i),
+ NVDEF(NV507D, HEAD_SET_BASE_LUT_LO, ENABLE, DISABLE));
+ return 0;
}
-static void
+static int
head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 3))) {
- evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
- evo_data(push, 0x80000000 | asyh->olut.mode << 30);
- evo_data(push, asyh->olut.offset >> 8);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_BASE_LUT_LO(i),
+ NVDEF(NV507D, HEAD_SET_BASE_LUT_LO, ENABLE, ENABLE) |
+ NVVAL(NV507D, HEAD_SET_BASE_LUT_LO, MODE, asyh->olut.mode) |
+ NVVAL(NV507D, HEAD_SET_BASE_LUT_LO, ORIGIN, 0),
+
+ HEAD_SET_BASE_LUT_HI(i),
+ NVVAL(NV507D, HEAD_SET_BASE_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+ return 0;
}
static void
@@ -278,53 +334,97 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
return false;
if (asyh->base.cpp == 1)
- asyh->olut.mode = 0;
+ asyh->olut.mode = NV507D_HEAD_SET_BASE_LUT_LO_MODE_LORES;
else
- asyh->olut.mode = 1;
+ asyh->olut.mode = NV507D_HEAD_SET_BASE_LUT_LO_MODE_HIRES;
asyh->olut.load = head507d_olut_load;
return true;
}
-void
+int
head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
- u32 *push;
- if ((push = evo_wait(core, 13))) {
- evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
- evo_data(push, 0x00800000 | m->clock);
- evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
- evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
- evo_data(push, 0x00000000);
- evo_data(push, m->v.active << 16 | m->h.active );
- evo_data(push, m->v.synce << 16 | m->h.synce );
- evo_data(push, m->v.blanke << 16 | m->h.blanke );
- evo_data(push, m->v.blanks << 16 | m->h.blanks );
- evo_data(push, m->v.blank2e << 16 | m->v.blank2s);
- evo_data(push, asyh->mode.v.blankus);
- evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_PIXEL_CLOCK(i),
+ NVVAL(NV507D, HEAD_SET_PIXEL_CLOCK, FREQUENCY, m->clock) |
+ NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, MODE, CLK_CUSTOM) |
+ NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, ADJ1000DIV1001, FALSE) |
+ NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, NOT_DRIVER, FALSE),
+
+ HEAD_SET_CONTROL(i),
+ NVVAL(NV507D, HEAD_SET_CONTROL, STRUCTURE, m->interlace));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_OVERSCAN_COLOR(i),
+ NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, RED, 0) |
+ NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, GRN, 0) |
+ NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, BLU, 0),
+
+ HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NV507D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NV507D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NV507D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NV507D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks),
+
+ HEAD_SET_RASTER_VERT_BLANK2(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK2, YSTART, m->v.blank2s) |
+ NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK2, YEND, m->v.blank2e),
+
+ HEAD_SET_RASTER_VERT_BLANK_DMI(i),
+ NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK_DMI, DURATION, m->v.blankus));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_DEFAULT_BASE_COLOR(i),
+ NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
+ NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
+ NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
+ return 0;
}
-void
+int
head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 7))) {
- evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
- evo_data(push, asyh->view.iH << 16 | asyh->view.iW);
- evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
- evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
- evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 7)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER(i),
+ NVDEF(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, VERTICAL_TAPS, TAPS_1) |
+ NVDEF(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, HORIZONTAL_TAPS, TAPS_1) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, HRESPONSE_BIAS, 0) |
+ NVVAL(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, VRESPONSE_BIAS, 0));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH),
+
+ HEAD_SET_VIEWPORT_SIZE_OUT_MIN(i),
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, WIDTH, asyh->view.oW) |
+ NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, HEIGHT, asyh->view.oH));
+ return 0;
}
const struct nv50_head_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index 11877119eea4..194d1771c481 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -22,85 +22,128 @@
#include "head.h"
#include "core.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl827d.h>
+
+static int
head827d_curs_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 4))) {
- evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
- evo_data(push, 0x05000000);
- evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_CURSOR(i), 0x00000000);
+ return 0;
}
-static void
+static int
head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 5))) {
- evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
- evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
- asyh->curs.format << 24);
- evo_data(push, asyh->curs.offset >> 8);
- evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
- evo_data(push, asyh->curs.handle);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND) |
+ NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, SUB_OWNER, NONE),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
}
-static void
+static int
head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 9))) {
- evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
- evo_data(push, asyh->core.offset >> 8);
- evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
- evo_data(push, asyh->core.h << 16 | asyh->core.w);
- evo_data(push, asyh->core.layout << 20 |
- (asyh->core.pitch >> 8) << 8 |
- asyh->core.blocks << 8 |
- asyh->core.blockh);
- evo_data(push, asyh->core.format << 8);
- evo_data(push, asyh->core.handle);
- evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
- evo_data(push, asyh->core.y << 16 | asyh->core.x);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 9)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_OFFSET(i, 0),
+ NVVAL(NV827D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_SIZE(i),
+ NVVAL(NV827D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+ NVVAL(NV827D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+ HEAD_SET_STORAGE(i),
+ NVVAL(NV827D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+ NVVAL(NV827D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+ NVVAL(NV827D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+ NVVAL(NV827D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+ HEAD_SET_PARAMS(i),
+ NVVAL(NV827D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+ NVDEF(NV827D, HEAD_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV827D, HEAD_SET_PARAMS, GAMMA, LINEAR),
+
+ HEAD_SET_CONTEXT_DMAS_ISO(i, 0),
+ NVVAL(NV827D, HEAD_SET_CONTEXT_DMAS_ISO, HANDLE, asyh->core.handle));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_VIEWPORT_POINT_IN(i, 0),
+ NVVAL(NV827D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+ NVVAL(NV827D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+ return 0;
}
-static void
+static int
head827d_olut_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 4))) {
- evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_BASE_LUT_LO(i),
+ NVDEF(NV827D, HEAD_SET_BASE_LUT_LO, ENABLE, DISABLE));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_LUT(i), 0x00000000);
+ return 0;
}
-static void
+static int
head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 5))) {
- evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
- evo_data(push, 0x80000000 | asyh->olut.mode << 30);
- evo_data(push, asyh->olut.offset >> 8);
- evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
- evo_data(push, asyh->olut.handle);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_BASE_LUT_LO(i),
+ NVDEF(NV827D, HEAD_SET_BASE_LUT_LO, ENABLE, ENABLE) |
+ NVVAL(NV827D, HEAD_SET_BASE_LUT_LO, MODE, asyh->olut.mode) |
+ NVVAL(NV827D, HEAD_SET_BASE_LUT_LO, ORIGIN, 0),
+
+ HEAD_SET_BASE_LUT_HI(i),
+ NVVAL(NV827D, HEAD_SET_BASE_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+
+ PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_LUT(i), asyh->olut.handle);
+ return 0;
}
const struct nv50_head_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 63a0b45d96d6..8f860e9c5224 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -29,196 +29,257 @@
#include "core.h"
#include "crc.h"
-void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907d.h>
+
+int
head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 3))) {
- evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
- evo_data(push, asyh->or.depth << 6 |
- asyh->or.nvsync << 4 |
- asyh->or.nhsync << 3 |
- asyh->or.crc_raster);
- evo_data(push, 0x31ec6000 | head->base.index << 25 |
- asyh->mode.interlace);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, asyh->or.depth),
+
+ HEAD_SET_CONTROL(i), 0x31ec6000 | head->base.index << 25 |
+ NVVAL(NV907D, HEAD_SET_CONTROL, STRUCTURE, asyh->mode.interlace));
+ return 0;
}
-void
+int
head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
- evo_data(push, asyh->procamp.sat.sin << 20 |
- asyh->procamp.sat.cos << 8);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_PROCAMP(i),
+ NVDEF(NV907D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NV907D, HEAD_SET_PROCAMP, CHROMA_LPF, AUTO) |
+ NVVAL(NV907D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+ NVVAL(NV907D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+ NVDEF(NV907D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA) |
+ NVDEF(NV907D, HEAD_SET_PROCAMP, RANGE_COMPRESSION, DISABLE));
+ return 0;
}
-static void
+static int
head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
- evo_data(push, asyh->dither.mode << 3 |
- asyh->dither.bits << 1 |
- asyh->dither.enable);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+ return 0;
}
-void
+int
head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
u32 bounds = 0;
- u32 *push;
+ int ret;
if (asyh->ovly.cpp) {
switch (asyh->ovly.cpp) {
- case 8: bounds |= 0x00000500; break;
- case 4: bounds |= 0x00000300; break;
- case 2: bounds |= 0x00000100; break;
+ case 8: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+ case 4: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
default:
WARN_ON(1);
break;
}
- bounds |= 0x00000001;
+ bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, USABLE, TRUE);
} else {
- bounds |= 0x00000100;
+ bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16);
}
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
- evo_data(push, bounds);
- evo_kick(push, core);
- }
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS(i), bounds);
+ return 0;
}
-static void
+static int
head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
u32 bounds = 0;
- u32 *push;
+ int ret;
if (asyh->base.cpp) {
switch (asyh->base.cpp) {
- case 8: bounds |= 0x00000500; break;
- case 4: bounds |= 0x00000300; break;
- case 2: bounds |= 0x00000100; break;
- case 1: bounds |= 0x00000000; break;
+ case 8: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+ case 4: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+ case 1: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
default:
WARN_ON(1);
break;
}
- bounds |= 0x00000001;
+ bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
}
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
- evo_data(push, bounds);
- evo_kick(push, core);
- }
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+ return 0;
}
-void
+int
head907d_curs_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 4))) {
- evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
- evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CURSOR(i), 0x00000000);
+ return 0;
}
-void
+int
head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 5))) {
- evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
- evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
- asyh->curs.format << 24);
- evo_data(push, asyh->curs.offset >> 8);
- evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
- evo_data(push, asyh->curs.handle);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
}
-void
+int
head907d_core_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMAS_ISO(i), 0x00000000);
+ return 0;
}
-void
+int
head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 9))) {
- evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
- evo_data(push, asyh->core.offset >> 8);
- evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
- evo_data(push, asyh->core.h << 16 | asyh->core.w);
- evo_data(push, asyh->core.layout << 24 |
- (asyh->core.pitch >> 8) << 8 |
- asyh->core.blocks << 8 |
- asyh->core.blockh);
- evo_data(push, asyh->core.format << 8);
- evo_data(push, asyh->core.handle);
- evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
- evo_data(push, asyh->core.y << 16 | asyh->core.x);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 9)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OFFSET(i),
+ NVVAL(NV907D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_SIZE(i),
+ NVVAL(NV907D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
+ NVVAL(NV907D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
+
+ HEAD_SET_STORAGE(i),
+ NVVAL(NV907D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
+ NVVAL(NV907D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
+ NVVAL(NV907D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
+ NVVAL(NV907D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
+
+ HEAD_SET_PARAMS(i),
+ NVVAL(NV907D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
+ NVDEF(NV907D, HEAD_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
+ NVDEF(NV907D, HEAD_SET_PARAMS, GAMMA, LINEAR),
+
+ HEAD_SET_CONTEXT_DMAS_ISO(i),
+ NVVAL(NV907D, HEAD_SET_CONTEXT_DMAS_ISO, HANDLE, asyh->core.handle));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_POINT_IN(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
+ return 0;
}
-void
+int
head907d_olut_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 4))) {
- evo_mthd(push, 0x0448 + (head->base.index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OUTPUT_LUT_LO(i),
+ NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, ENABLE, DISABLE));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_LUT(i), 0x00000000);
+ return 0;
}
-void
+int
head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 5))) {
- evo_mthd(push, 0x0448 + (head->base.index * 0x300), 2);
- evo_data(push, 0x80000000 | asyh->olut.mode << 24);
- evo_data(push, asyh->olut.offset >> 8);
- evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
- evo_data(push, asyh->olut.handle);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OUTPUT_LUT_LO(i),
+ NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, ENABLE, ENABLE) |
+ NVVAL(NV907D, HEAD_SET_OUTPUT_LUT_LO, MODE, asyh->olut.mode) |
+ NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, NEVER_YIELD_TO_BASE, DISABLE),
+
+ HEAD_SET_OUTPUT_LUT_HI(i),
+ NVVAL(NV907D, HEAD_SET_OUTPUT_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_LUT(i), asyh->olut.handle);
+ return 0;
}
void
@@ -244,52 +305,110 @@ head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
if (size != 256 && size != 1024)
return false;
- asyh->olut.mode = size == 1024 ? 4 : 7;
+ if (size == 1024)
+ asyh->olut.mode = NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
+ else
+ asyh->olut.mode = NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE;
+
asyh->olut.load = head907d_olut_load;
return true;
}
-void
+int
head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
- u32 *push;
- if ((push = evo_wait(core, 14))) {
- evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, m->v.active << 16 | m->h.active );
- evo_data(push, m->v.synce << 16 | m->h.synce );
- evo_data(push, m->v.blanke << 16 | m->h.blanke );
- evo_data(push, m->v.blanks << 16 | m->h.blanks );
- evo_data(push, m->v.blank2e << 16 | m->v.blank2s);
- evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
- evo_data(push, 0x00000000); /* ??? */
- evo_data(push, 0xffffff00);
- evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
- evo_data(push, m->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, m->clock * 1000);
- evo_kick(push, core);
- }
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 14)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_OVERSCAN_COLOR(i),
+ NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, RED, 0) |
+ NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, GRN, 0) |
+ NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, BLU, 0),
+
+ HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NV907D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NV907D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NV907D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NV907D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks),
+
+ HEAD_SET_RASTER_VERT_BLANK2(i),
+ NVVAL(NV907D, HEAD_SET_RASTER_VERT_BLANK2, YSTART, m->v.blank2s) |
+ NVVAL(NV907D, HEAD_SET_RASTER_VERT_BLANK2, YEND, m->v.blank2e));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_DEFAULT_BASE_COLOR(i),
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0),
+
+ HEAD_SET_CRC_CONTROL(i),
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE) |
+ NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000) |
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, ADJ1000DIV1001, FALSE),
+
+ HEAD_SET_PIXEL_CLOCK_CONFIGURATION(i),
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, MODE, CLK_CUSTOM) |
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, NOT_DRIVER, FALSE) |
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, ENABLE_HOPPING, FALSE),
+
+ HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000) |
+ NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, ADJ1000DIV1001, FALSE));
+ return 0;
}
-void
+int
head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 8))) {
- evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
- evo_data(push, asyh->view.iH << 16 | asyh->view.iW);
- evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
- evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
- evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
- evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 8)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER(i),
+ NVDEF(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, VERTICAL_TAPS, TAPS_1) |
+ NVDEF(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, HORIZONTAL_TAPS, TAPS_1) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, HRESPONSE_BIAS, 0) |
+ NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, VRESPONSE_BIAS, 0));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH),
+
+ HEAD_SET_VIEWPORT_SIZE_OUT_MIN(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, WIDTH, asyh->view.oW) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, HEIGHT, asyh->view.oH),
+
+ HEAD_SET_VIEWPORT_SIZE_OUT_MAX(i),
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MAX, WIDTH, asyh->view.oW) |
+ NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MAX, HEIGHT, asyh->view.oH));
+ return 0;
}
const struct nv50_head_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 76958cedd51f..a5d827403660 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -22,45 +22,55 @@
#include "head.h"
#include "core.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl917d.h>
+
+static int
head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
- evo_data(push, asyh->dither.mode << 3 |
- asyh->dither.bits << 1 |
- asyh->dither.enable);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+ return 0;
}
-static void
+static int
head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
u32 bounds = 0;
- u32 *push;
+ int ret;
if (asyh->base.cpp) {
switch (asyh->base.cpp) {
- case 8: bounds |= 0x00000500; break;
- case 4: bounds |= 0x00000300; break;
- case 2: bounds |= 0x00000100; break;
- case 1: bounds |= 0x00000000; break;
+ case 8: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
+ case 4: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
+ case 2: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
+ case 1: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
default:
WARN_ON(1);
break;
}
- bounds |= 0x00020001;
+ bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
+ bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, BASE_LUT, USAGE_1025);
}
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
- evo_data(push, bounds);
- evo_kick(push, core);
- }
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
+ return 0;
}
int
@@ -68,10 +78,10 @@ head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
switch (asyw->state.fb->width) {
- case 32: asyh->curs.layout = 0; break;
- case 64: asyh->curs.layout = 1; break;
- case 128: asyh->curs.layout = 2; break;
- case 256: asyh->curs.layout = 3; break;
+ case 32: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32; break;
+ case 64: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64; break;
+ case 128: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128; break;
+ case 256: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256; break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index 35fcdf8825b5..63adfeba50e5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -23,96 +23,131 @@
#include "atom.h"
#include "core.h"
-static void
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37d.h>
+
+static int
headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
u8 depth;
- u32 *push;
-
- if ((push = evo_wait(core, 2))) {
- /*XXX: This is a dirty hack until OR depth handling is
- * improved later for deep colour etc.
- */
- switch (asyh->or.depth) {
- case 6: depth = 5; break;
- case 5: depth = 4; break;
- case 2: depth = 1; break;
- case 0: depth = 4; break;
- default:
- depth = asyh->or.depth;
- WARN_ON(1);
- break;
- }
-
- evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
- evo_data(push, depth << 4 |
- asyh->or.nvsync << 3 |
- asyh->or.nhsync << 2 |
- asyh->or.crc_raster);
- evo_kick(push, core);
+ int ret;
+
+ /*XXX: This is a dirty hack until OR depth handling is
+ * improved later for deep colour etc.
+ */
+ switch (asyh->or.depth) {
+ case 6: depth = 5; break;
+ case 5: depth = 4; break;
+ case 2: depth = 1; break;
+ case 0: depth = 4; break;
+ default:
+ depth = asyh->or.depth;
+ WARN_ON(1);
+ break;
}
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE));
+ return 0;
}
-static void
+static int
headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
- evo_data(push, 0x80000000 |
- asyh->procamp.sat.sin << 16 |
- asyh->procamp.sat.cos << 4);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_PROCAMP(i),
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+ NVVAL(NVC37D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
+ NVVAL(NVC37D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA) |
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, RANGE_COMPRESSION, DISABLE) |
+ NVDEF(NVC37D, HEAD_SET_PROCAMP, BLACK_LEVEL, GRAPHICS));
+ return 0;
}
-void
+int
headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x2018 + (head->base.index * 0x0400), 1);
- evo_data(push, asyh->dither.mode << 8 |
- asyh->dither.bits << 4 |
- asyh->dither.enable);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVDEF(NVC37D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) |
+ NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+ return 0;
}
-void
+int
headc37d_curs_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 4))) {
- evo_mthd(push, 0x209c + head->base.index * 0x400, 1);
- evo_data(push, 0x000000cf);
- evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8));
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CURSOR(i, 0), 0x00000000);
+ return 0;
}
-void
+int
headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 7))) {
- evo_mthd(push, 0x209c + head->base.index * 0x400, 2);
- evo_data(push, 0x80000000 |
- asyh->curs.layout << 8 |
- asyh->curs.format << 0);
- evo_data(push, 0x000072ff);
- evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
- evo_data(push, asyh->curs.handle);
- evo_mthd(push, 0x2090 + head->base.index * 0x400, 1);
- evo_data(push, asyh->curs.offset >> 8);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 7)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, DE_GAMMA, NONE),
+
+ HEAD_SET_CONTROL_CURSOR_COMPOSITION(i),
+ NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT,
+ K1) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT,
+ NEG_K1_TIMES_SRC) |
+ NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND));
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CURSOR(i, 0), asyh->curs.handle);
+ PUSH_MTHD(push, NVC37D, HEAD_SET_OFFSET_CURSOR(i, 0), asyh->curs.offset >> 8);
+ return 0;
}
int
@@ -123,32 +158,38 @@ headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
return 0;
}
-static void
+static int
headc37d_olut_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x20ac + (head->base.index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(i), 0x00000000);
+ return 0;
}
-static void
+static int
headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 4))) {
- evo_mthd(push, 0x20a4 + (head->base.index * 0x400), 3);
- evo_data(push, asyh->olut.output_mode << 8 |
- asyh->olut.range << 4 |
- asyh->olut.size);
- evo_data(push, asyh->olut.offset >> 8);
- evo_data(push, asyh->olut.handle);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT(i),
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, SIZE, asyh->olut.size) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, RANGE, asyh->olut.range) |
+ NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, OUTPUT_MODE, asyh->olut.output_mode),
+
+ HEAD_SET_OFFSET_OUTPUT_LUT(i), asyh->olut.offset >> 8,
+ HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(i), asyh->olut.handle);
+ return 0;
}
static bool
@@ -157,51 +198,77 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
if (size != 256 && size != 1024)
return false;
- asyh->olut.mode = 2;
- asyh->olut.size = size == 1024 ? 2 : 0;
- asyh->olut.range = 0;
- asyh->olut.output_mode = 1;
+ asyh->olut.size = size == 1024 ? NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025 :
+ NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257;
+ asyh->olut.range = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY;
+ asyh->olut.output_mode = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE;
asyh->olut.load = head907d_olut_load;
return true;
}
-static void
+static int
headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
- u32 *push;
- if ((push = evo_wait(core, 13))) {
- evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
- evo_data(push, (m->v.active << 16) | m->h.active );
- evo_data(push, (m->v.synce << 16) | m->h.synce );
- evo_data(push, (m->v.blanke << 16) | m->h.blanke );
- evo_data(push, (m->v.blanks << 16) | m->h.blanks );
- evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
- evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
- evo_data(push, m->interlace);
- evo_data(push, m->clock * 1000);
- evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
- evo_data(push, m->clock * 1000);
- /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
- evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
- evo_data(push, 0x00000124);
- evo_kick(push, core);
- }
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 15)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NVC37D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NVC37D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NVC37D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NVC37D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+ //XXX:
+ PUSH_NVSQ(push, NVC37D, 0x2074 + (i * 0x400), m->v.blank2e << 16 | m->v.blank2s);
+ PUSH_NVSQ(push, NVC37D, 0x2008 + (i * 0x400), m->interlace);
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+ /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+ PUSH_MTHD(push, NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+ NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+ NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_LUT, USAGE_1025) |
+ NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+ return 0;
}
-void
+int
headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 4))) {
- evo_mthd(push, 0x204c + (head->base.index * 0x400), 1);
- evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
- evo_mthd(push, 0x2058 + (head->base.index * 0x400), 1);
- evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH));
+ return 0;
}
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index c7d04dd935fd..fd51527b56b8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -23,83 +23,97 @@
#include "atom.h"
#include "core.h"
-static void
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57d.h>
+
+static int
headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
u8 depth;
- u32 *push;
-
- if ((push = evo_wait(core, 2))) {
- /*XXX: This is a dirty hack until OR depth handling is
- * improved later for deep colour etc.
- */
- switch (asyh->or.depth) {
- case 6: depth = 5; break;
- case 5: depth = 4; break;
- case 2: depth = 1; break;
- case 0: depth = 4; break;
- default:
- depth = asyh->or.depth;
- WARN_ON(1);
- break;
- }
+ int ret;
- evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
- evo_data(push, 0xfc000000 |
- depth << 4 |
- asyh->or.nvsync << 3 |
- asyh->or.nhsync << 2 |
- asyh->or.crc_raster);
- evo_kick(push, core);
+ /*XXX: This is a dirty hack until OR depth handling is
+ * improved later for deep colour etc.
+ */
+ switch (asyh->or.depth) {
+ case 6: depth = 5; break;
+ case 5: depth = 4; break;
+ case 2: depth = 1; break;
+ case 0: depth = 4; break;
+ default:
+ depth = asyh->or.depth;
+ WARN_ON(1);
+ break;
}
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+ NVDEF(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) |
+ NVDEF(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE));
+ return 0;
}
-static void
+static int
headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
-#if 0
- evo_data(push, 0x80000000 |
- asyh->procamp.sat.sin << 16 |
- asyh->procamp.sat.cos << 4);
-#else
- evo_data(push, 0);
-#endif
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ //TODO:
+ PUSH_MTHD(push, NVC57D, HEAD_SET_PROCAMP(i),
+ NVDEF(NVC57D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NVC57D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+ NVDEF(NVC57D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA));
+ return 0;
}
-void
+static int
headc57d_olut_clr(struct nv50_head *head)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 2))) {
- evo_mthd(push, 0x2288 + (head->base.index * 0x400), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_OLUT(i), 0x00000000);
+ return 0;
}
-void
+static int
headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
- u32 *push;
- if ((push = evo_wait(core, 4))) {
- evo_mthd(push, 0x2280 + (head->base.index * 0x400), 4);
- evo_data(push, asyh->olut.size << 8 |
- asyh->olut.mode << 2 |
- asyh->olut.output_mode);
- evo_data(push, 0xffffffff); /* FP_NORM_SCALE. */
- evo_data(push, asyh->olut.handle);
- evo_data(push, asyh->olut.offset >> 8);
- evo_kick(push, core);
- }
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_OLUT_CONTROL(i),
+ NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) |
+ NVDEF(NVC57D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) |
+ NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) |
+ NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size),
+
+ HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff,
+ HEAD_SET_CONTEXT_DMA_OLUT(i), asyh->olut.handle,
+ HEAD_SET_OFFSET_OLUT(i), asyh->olut.offset >> 8);
+ return 0;
}
static void
@@ -161,9 +175,9 @@ headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
if (size != 0 && size != 256 && size != 1024)
return false;
- asyh->olut.mode = 2; /* DIRECT10 */
+ asyh->olut.mode = NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10;
asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
- asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
+ asyh->olut.output_mode = NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE;
if (size == 256)
asyh->olut.load = headc57d_olut_load_8;
else
@@ -171,29 +185,50 @@ headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
return true;
}
-static void
+static int
headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
- u32 *push;
- if ((push = evo_wait(core, 13))) {
- evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
- evo_data(push, (m->v.active << 16) | m->h.active );
- evo_data(push, (m->v.synce << 16) | m->h.synce );
- evo_data(push, (m->v.blanke << 16) | m->h.blanke );
- evo_data(push, (m->v.blanks << 16) | m->h.blanks );
- evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
- evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
- evo_data(push, m->interlace);
- evo_data(push, m->clock * 1000);
- evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
- evo_data(push, m->clock * 1000);
- /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
- evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
- evo_data(push, 0x00001014);
- evo_kick(push, core);
- }
+ const int i = head->base.index;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 15)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NVC57D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NVC57D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NVC57D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NVC57D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+ //XXX:
+ PUSH_NVSQ(push, NVC57D, 0x2074 + (i * 0x400), m->v.blank2e << 16 | m->v.blank2s);
+ PUSH_NVSQ(push, NVC57D, 0x2008 + (i * 0x400), m->interlace);
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+ PUSH_MTHD(push, NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+ /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+ PUSH_MTHD(push, NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+ NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+ NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) |
+ NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+ return 0;
}
const struct nv50_head_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
index 4e95ca5604ab..6b2ad1e6eab9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -60,7 +60,7 @@ nv50_lut_fini(struct nv50_lut *lut)
{
int i;
for (i = 0; i < ARRAY_SIZE(lut->mem); i++)
- nvif_mem_fini(&lut->mem[i]);
+ nvif_mem_dtor(&lut->mem[i]);
}
int
@@ -70,8 +70,8 @@ nv50_lut_init(struct nv50_disp *disp, struct nvif_mmu *mmu,
const u32 size = disp->disp->object.oclass < GF110_DISP ? 257 : 1025;
int i;
for (i = 0; i < ARRAY_SIZE(lut->mem); i++) {
- int ret = nvif_mem_init_map(mmu, NVIF_MEM_VRAM, size * 8,
- &lut->mem[i]);
+ int ret = nvif_mem_ctor_map(mmu, "kmsLut", NVIF_MEM_VRAM,
+ size * 8, &lut->mem[i]);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
index 2ee404b3e19f..a6c3a9b95bdb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
@@ -33,8 +33,8 @@ oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
struct nv50_disp *disp = nv50_disp(drm->dev);
int ret;
- ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
- sizeof(args), &wndw->wimm.base.user);
+ ret = nvif_object_ctor(&disp->disp->object, "kmsOvim", 0, oclass,
+ &args, sizeof(args), &wndw->wimm.base.user);
if (ret) {
NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.h b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
index 4869d52d1786..6ae1fbe12ca7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
@@ -10,11 +10,7 @@ int ovly507e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
void ovly507e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
-void ovly507e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void ovly507e_ntfy_clr(struct nv50_wndw *);
-void ovly507e_image_clr(struct nv50_wndw *);
-void ovly507e_scale_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void ovly507e_update(struct nv50_wndw *, u32 *);
+int ovly507e_scale_set(struct nv50_wndw *, struct nv50_wndw_atom *);
extern const u32 ovly827e_format[];
void ovly827e_ntfy_reset(struct nouveau_bo *, u32);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index 4cce1078140a..afd6c7271de1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -28,91 +28,68 @@
#include <nvif/cl507e.h>
#include <nvif/event.h>
+#include <nvif/push507c.h>
-void
-ovly507e_update(struct nv50_wndw *wndw, u32 *interlock)
-{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, interlock[NV50_DISP_INTERLOCK_CORE]);
- evo_kick(push, &wndw->wndw);
- }
-}
+#include <nvhw/class/cl507e.h>
-void
+int
ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 4))) {
- evo_mthd(push, 0x00e0, 3);
- evo_data(push, asyw->scale.sy << 16 | asyw->scale.sx);
- evo_data(push, asyw->scale.sh << 16 | asyw->scale.sw);
- evo_data(push, asyw->scale.dw);
- evo_kick(push, &wndw->wndw);
- }
-}
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
-void
-ovly507e_image_clr(struct nv50_wndw *wndw)
-{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 4))) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NV507E, SET_POINT_IN,
+ NVVAL(NV507E, SET_POINT_IN, X, asyw->scale.sx) |
+ NVVAL(NV507E, SET_POINT_IN, Y, asyw->scale.sy),
+
+ SET_SIZE_IN,
+ NVVAL(NV507E, SET_SIZE_IN, WIDTH, asyw->scale.sw) |
+ NVVAL(NV507E, SET_SIZE_IN, HEIGHT, asyw->scale.sh),
+
+ SET_SIZE_OUT,
+ NVVAL(NV507E, SET_SIZE_OUT, WIDTH, asyw->scale.dw));
+ return 0;
}
-static void
+static int
ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 12))) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, asyw->image.interval << 4);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, asyw->image.handle[0]);
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0x00000002);
- evo_mthd(push, 0x0800, 1);
- evo_data(push, asyw->image.offset[0] >> 8);
- evo_mthd(push, 0x0808, 3);
- evo_data(push, asyw->image.h << 16 | asyw->image.w);
- evo_data(push, asyw->image.layout << 20 |
- (asyw->image.pitch[0] >> 8) << 8 |
- asyw->image.blocks[0] << 8 |
- asyw->image.blockh);
- evo_data(push, asyw->image.kind << 16 |
- asyw->image.format << 8 |
- asyw->image.colorspace);
- evo_kick(push, &wndw->wndw);
- }
-}
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
-void
-ovly507e_ntfy_clr(struct nv50_wndw *wndw)
-{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x00a4, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
-}
+ if ((ret = PUSH_WAIT(push, 12)))
+ return ret;
-void
-ovly507e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 3))) {
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset);
- evo_data(push, asyw->ntfy.handle);
- evo_kick(push, &wndw->wndw);
- }
+ PUSH_MTHD(push, NV507E, SET_PRESENT_CONTROL,
+ NVDEF(NV507E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+ NVVAL(NV507E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV507E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+ PUSH_MTHD(push, NV507E, SET_COMPOSITION_CONTROL,
+ NVDEF(NV507E, SET_COMPOSITION_CONTROL, MODE, OPAQUE_SUSPEND_BASE));
+
+ PUSH_MTHD(push, NV507E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NV507E, SURFACE_SET_SIZE,
+ NVVAL(NV507E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV507E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE,
+ NVVAL(NV507E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV507E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+ NVVAL(NV507E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV507E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS,
+ NVVAL(NV507E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVVAL(NV507E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace) |
+ NVVAL(NV507E, SURFACE_SET_PARAMS, KIND, asyw->image.kind) |
+ NVDEF(NV507E, SURFACE_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256));
+ return 0;
}
void
@@ -146,14 +123,14 @@ static const struct nv50_wndw_func
ovly507e = {
.acquire = ovly507e_acquire,
.release = ovly507e_release,
- .ntfy_set = ovly507e_ntfy_set,
- .ntfy_clr = ovly507e_ntfy_clr,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
.ntfy_reset = base507c_ntfy_reset,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.image_set = ovly507e_image_set,
- .image_clr = ovly507e_image_clr,
+ .image_clr = base507c_image_clr,
.scale_set = ovly507e_scale_set,
- .update = ovly507e_update,
+ .update = base507c_update,
};
static const u32
@@ -192,7 +169,8 @@ ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
return ret;
}
- ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func, false,
+ ret = nvif_notify_ctor(&wndw->wndw.base.user, "kmsOvlyNtfy",
+ wndw->notify.func, false,
NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT,
&(struct nvif_notify_uevent_req) {},
sizeof(struct nvif_notify_uevent_req),
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index 4f7ce57f2036..02dc02d9260f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -24,31 +24,45 @@
#include <nouveau_bo.h>
+#include <nvif/push507c.h>
#include <nvif/timer.h>
-static void
+#include <nvhw/class/cl827e.h>
+
+static int
ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 12))) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, asyw->image.interval << 4);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, asyw->image.handle[0]);
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0x00000002);
- evo_mthd(push, 0x0800, 1);
- evo_data(push, asyw->image.offset[0] >> 8);
- evo_mthd(push, 0x0808, 3);
- evo_data(push, asyw->image.h << 16 | asyw->image.w);
- evo_data(push, asyw->image.layout << 20 |
- (asyw->image.pitch[0] >> 8) << 8 |
- asyw->image.blocks[0] << 8 |
- asyw->image.blockh);
- evo_data(push, asyw->image.format << 8 |
- asyw->image.colorspace);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 12)))
+ return ret;
+
+ PUSH_MTHD(push, NV827E, SET_PRESENT_CONTROL,
+ NVDEF(NV827E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+ NVVAL(NV827E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV827E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+ PUSH_MTHD(push, NV827E, SET_COMPOSITION_CONTROL,
+ NVDEF(NV827E, SET_COMPOSITION_CONTROL, MODE, OPAQUE_SUSPEND_BASE));
+
+ PUSH_MTHD(push, NV827E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NV827E, SURFACE_SET_SIZE,
+ NVVAL(NV827E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV827E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE,
+ NVVAL(NV827E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV827E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+ NVVAL(NV827E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV827E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS,
+ NVVAL(NV827E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVVAL(NV827E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace));
+ return 0;
}
int
@@ -56,8 +70,7 @@ ovly827e_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
struct nvif_device *device)
{
s64 time = nvif_msec(device, 2000ULL,
- u32 data = nouveau_bo_rd32(bo, offset / 4 + 3);
- if ((data & 0xffff0000) == 0xffff0000)
+ if (NVBO_TD32(bo, offset, NV_DISP_NOTIFICATION_1, _3, STATUS, ==, BEGUN))
break;
usleep_range(1, 2);
);
@@ -67,24 +80,25 @@ ovly827e_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
void
ovly827e_ntfy_reset(struct nouveau_bo *bo, u32 offset)
{
- nouveau_bo_wr32(bo, offset / 4 + 0, 0x00000000);
- nouveau_bo_wr32(bo, offset / 4 + 1, 0x00000000);
- nouveau_bo_wr32(bo, offset / 4 + 2, 0x00000000);
- nouveau_bo_wr32(bo, offset / 4 + 3, 0x80000000);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, TIME_STAMP_0, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, TIME_STAMP_1, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, _2, 0);
+ NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, _3,
+ NVDEF(NV_DISP_NOTIFICATION_1, _3, STATUS, NOT_BEGUN));
}
static const struct nv50_wndw_func
ovly827e = {
.acquire = ovly507e_acquire,
.release = ovly507e_release,
- .ntfy_set = ovly507e_ntfy_set,
- .ntfy_clr = ovly507e_ntfy_clr,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
.ntfy_reset = ovly827e_ntfy_reset,
.ntfy_wait_begun = ovly827e_ntfy_wait_begun,
.image_set = ovly827e_image_set,
- .image_clr = ovly507e_image_clr,
+ .image_clr = base507c_image_clr,
.scale_set = ovly507e_scale_set,
- .update = ovly507e_update,
+ .update = base507c_update,
};
const u32
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
index 9efe5e9d5ce4..645130d18a99 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -22,43 +22,58 @@
#include "ovly.h"
#include "atom.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl907e.h>
+
+static int
ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 12))) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, asyw->image.interval << 4);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, asyw->image.handle[0]);
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0x00000002);
- evo_mthd(push, 0x0400, 1);
- evo_data(push, asyw->image.offset[0] >> 8);
- evo_mthd(push, 0x0408, 3);
- evo_data(push, asyw->image.h << 16 | asyw->image.w);
- evo_data(push, asyw->image.layout << 24 |
- (asyw->image.pitch[0] >> 8) << 8 |
- asyw->image.blocks[0] << 8 |
- asyw->image.blockh);
- evo_data(push, asyw->image.format << 8 |
- asyw->image.colorspace);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 12)))
+ return ret;
+
+ PUSH_MTHD(push, NV907E, SET_PRESENT_CONTROL,
+ NVDEF(NV907E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
+ NVVAL(NV907E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
+
+ PUSH_MTHD(push, NV907E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
+
+ PUSH_MTHD(push, NV907E, SET_COMPOSITION_CONTROL,
+ NVDEF(NV907E, SET_COMPOSITION_CONTROL, MODE, OPAQUE));
+
+ PUSH_MTHD(push, NV907E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NV907E, SURFACE_SET_SIZE,
+ NVVAL(NV907E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NV907E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
+
+ SURFACE_SET_STORAGE,
+ NVVAL(NV907E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NV907E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
+ NVVAL(NV907E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NV907E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SURFACE_SET_PARAMS,
+ NVVAL(NV907E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
+ NVVAL(NV907E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace));
+ return 0;
}
const struct nv50_wndw_func
ovly907e = {
.acquire = ovly507e_acquire,
.release = ovly507e_release,
- .ntfy_set = ovly507e_ntfy_set,
- .ntfy_clr = ovly507e_ntfy_clr,
+ .ntfy_set = base507c_ntfy_set,
+ .ntfy_clr = base507c_ntfy_clr,
.ntfy_reset = ovly827e_ntfy_reset,
.ntfy_wait_begun = ovly827e_ntfy_wait_begun,
.image_set = ovly907e_image_set,
- .image_clr = ovly507e_image_clr,
+ .image_clr = base507c_image_clr,
.scale_set = ovly507e_scale_set,
- .update = ovly507e_update,
+ .update = base507c_update,
};
static const u32
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
index 45d8ce7d2c28..17d230256bdd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -21,21 +21,29 @@
*/
#include "core.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+
+static int
pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- u32 *push;
- if ((push = evo_wait(&core->chan, 2))) {
- if (asyh) {
- ctrl |= asyh->or.depth << 16;
- ctrl |= asyh->or.nvsync << 13;
- ctrl |= asyh->or.nhsync << 12;
- }
- evo_mthd(push, 0x0700 + (or * 0x040), 1);
- evo_data(push, ctrl);
- evo_kick(push, &core->chan);
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if (asyh) {
+ ctrl |= NVVAL(NV507D, PIOR_SET_CONTROL, HSYNC_POLARITY, asyh->or.nhsync);
+ ctrl |= NVVAL(NV507D, PIOR_SET_CONTROL, VSYNC_POLARITY, asyh->or.nvsync);
+ ctrl |= NVVAL(NV837D, PIOR_SET_CONTROL, PIXEL_DEPTH, asyh->or.depth);
}
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, PIOR_SET_CONTROL(or), ctrl);
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
index 9a59fa7da00d..ca73d7710885 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -21,21 +21,29 @@
*/
#include "core.h"
-static void
+#include <nvif/push507c.h>
+
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl837d.h>
+
+static int
sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- u32 *push;
- if ((push = evo_wait(&core->chan, 2))) {
- if (asyh) {
- ctrl |= asyh->or.depth << 16;
- ctrl |= asyh->or.nvsync << 13;
- ctrl |= asyh->or.nhsync << 12;
- }
- evo_mthd(push, 0x0600 + (or * 0x40), 1);
- evo_data(push, ctrl);
- evo_kick(push, &core->chan);
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if (asyh) {
+ ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, HSYNC_POLARITY, asyh->or.nhsync);
+ ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, VSYNC_POLARITY, asyh->or.nvsync);
+ ctrl |= NVVAL(NV837D, SOR_SET_CONTROL, PIXEL_DEPTH, asyh->or.depth);
}
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV507D, SOR_SET_CONTROL(or), ctrl);
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
index 9577ccf1c809..c86cd8fa61d6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -21,28 +21,34 @@
*/
#include "core.h"
-#include <nouveau_bo.h>
#include <nvif/class.h>
+#include <nvif/push507c.h>
-static void
+#include <nvhw/class/cl907d.h>
+
+#include <nouveau_bo.h>
+
+static int
sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- u32 *push;
- if ((push = evo_wait(&core->chan, 2))) {
- evo_mthd(push, 0x0200 + (or * 0x20), 1);
- evo_data(push, ctrl);
- evo_kick(push, &core->chan);
- }
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NV907D, SOR_SET_CONTROL(or), ctrl);
+ return 0;
}
static void
sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
{
+ struct nouveau_bo *bo = disp->sync;
const int off = or * 2;
- u32 tmp = nouveau_bo_rd32(disp->sync, 0x000014 + off);
-
- outp->caps.dp_interlace = !!(tmp & 0x04000000);
+ outp->caps.dp_interlace =
+ NVBO_RV32(bo, off, NV907D_CORE_NOTIFIER_3, CAPABILITIES_CAP_SOR0_20, DP_INTERLACE);
}
const struct nv50_outp_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
index c86ca955fdcd..9eaef34816da 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -21,16 +21,22 @@
*/
#include "core.h"
-static void
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc37d.h>
+
+static int
sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- u32 *push;
- if ((push = evo_wait(&core->chan, 2))) {
- evo_mthd(push, 0x0300 + (or * 0x20), 1);
- evo_data(push, ctrl);
- evo_kick(push, &core->chan);
- }
+ struct nvif_push *push = core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37D, SOR_SET_CONTROL(or), ctrl);
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index f7dbd965e4e7..685b70871324 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -24,30 +24,38 @@
#include "wndw.h"
#include <nvif/clc37b.h>
+#include <nvif/pushc37b.h>
-static void
+#include <nvhw/class/clc37b.h>
+
+static int
wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wimm, 2))) {
- evo_mthd(push, 0x0200, 1);
- if (interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)
- evo_data(push, 0x00000003);
- else
- evo_data(push, 0x00000001);
- evo_kick(push, &wndw->wimm);
- }
+ struct nvif_push *push = wndw->wimm.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37B, UPDATE, 0x00000001 |
+ NVVAL(NVC37B, UPDATE, INTERLOCK_WITH_WINDOW,
+ !!(interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)));
+ return PUSH_KICK(push);
}
-static void
+static int
wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wimm, 2))) {
- evo_mthd(push, 0x0208, 1);
- evo_data(push, asyw->point.y << 16 | asyw->point.x);
- evo_kick(push, &wndw->wimm);
- }
+ struct nvif_push *push = wndw->wimm.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37B, SET_POINT_OUT(0),
+ NVVAL(NVC37B, SET_POINT_OUT, X, asyw->point.x) |
+ NVVAL(NVC37B, SET_POINT_OUT, Y, asyw->point.y));
+ return 0;
}
static const struct nv50_wimm_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 293ccfdba17e..447ecc9fec42 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -26,6 +26,10 @@
#include <nvif/class.h>
#include <nvif/cl0002.h>
+#include <nvhw/class/cl507c.h>
+#include <nvhw/class/cl507e.h>
+#include <nvhw/class/clc37e.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
@@ -35,7 +39,7 @@
static void
nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
{
- nvif_object_fini(&ctxdma->object);
+ nvif_object_dtor(&ctxdma->object);
list_del(&ctxdma->head);
kfree(ctxdma);
}
@@ -94,8 +98,8 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
argc += sizeof(args.gf119);
}
- ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
- &args, argc, &ctxdma->object);
+ ret = nvif_object_ctor(wndw->ctxdma.parent, "kmsFbCtxDma", handle,
+ NV_DMA_IN_MEMORY, &args, argc, &ctxdma->object);
if (ret) {
nv50_wndw_ctxdma_del(ctxdma);
return ERR_PTR(ret);
@@ -137,7 +141,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
struct nv50_wndw_atom *asyw)
{
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
- asyw->image.mode = 0;
+ asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING;
asyw->image.interval = 1;
}
@@ -201,13 +205,18 @@ static int
nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
{
switch (asyw->state.fb->format->format) {
- case DRM_FORMAT_YUYV: asyw->image.format = 0x28; break;
- case DRM_FORMAT_UYVY: asyw->image.format = 0x29; break;
+ case DRM_FORMAT_YUYV:
+ asyw->image.format = NV507E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8;
+ break;
+ case DRM_FORMAT_UYVY:
+ asyw->image.format = NV507E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8;
+ break;
default:
WARN_ON(1);
return -EINVAL;
}
- asyw->image.colorspace = 1;
+
+ asyw->image.colorspace = NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601;
return 0;
}
@@ -215,24 +224,41 @@ static int
nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
{
switch (asyw->state.fb->format->format) {
- case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
- case DRM_FORMAT_XRGB8888 :
- case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
- case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
- case DRM_FORMAT_XRGB1555 :
- case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
- case DRM_FORMAT_XBGR2101010 :
- case DRM_FORMAT_ABGR2101010 : asyw->image.format = 0xd1; break;
- case DRM_FORMAT_XBGR8888 :
- case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
- case DRM_FORMAT_XRGB2101010 :
- case DRM_FORMAT_ARGB2101010 : asyw->image.format = 0xdf; break;
+ case DRM_FORMAT_C8:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_I8;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8;
+ break;
+ case DRM_FORMAT_RGB565:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_R5G6B5;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ asyw->image.format = NVC37E_SET_PARAMS_FORMAT_A2R10G10B10;
+ break;
case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F: asyw->image.format = 0xca; break;
+ case DRM_FORMAT_ABGR16161616F:
+ asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16;
+ break;
default:
return -EINVAL;
}
- asyw->image.colorspace = 0;
+
+ asyw->image.colorspace = NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB;
return 0;
}
@@ -265,7 +291,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
}
if (asyw->image.kind) {
- asyw->image.layout = 0;
+ asyw->image.layout = NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR;
if (drm->client.device.info.chipset >= 0xc0)
asyw->image.blockh = tile_mode >> 4;
else
@@ -273,8 +299,8 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
asyw->image.blocks[0] = fb->pitches[0] / 64;
asyw->image.pitch[0] = 0;
} else {
- asyw->image.layout = 1;
- asyw->image.blockh = 0;
+ asyw->image.layout = NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH;
+ asyw->image.blockh = NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB;
asyw->image.blocks[0] = 0;
asyw->image.pitch[0] = fb->pitches[0];
}
@@ -283,7 +309,12 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
asyw->image.interval = 1;
else
asyw->image.interval = 0;
- asyw->image.mode = asyw->image.interval ? 0 : 1;
+
+ if (asyw->image.interval)
+ asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING;
+ else
+ asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE;
+
asyw->set.image = wndw->func->image_set != NULL;
}
@@ -303,17 +334,17 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
asyw->blend.k1 = asyw->state.alpha >> 8;
switch (asyw->state.pixel_blend_mode) {
case DRM_MODE_BLEND_PREMULTI:
- asyw->blend.src_color = 2; /* K1 */
- asyw->blend.dst_color = 7; /* NEG_K1_TIMES_SRC */
+ asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1;
+ asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC;
break;
case DRM_MODE_BLEND_COVERAGE:
- asyw->blend.src_color = 5; /* K1_TIMES_SRC */
- asyw->blend.dst_color = 7; /* NEG_K1_TIMES_SRC */
+ asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC;
+ asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC;
break;
case DRM_MODE_BLEND_PIXEL_NONE:
default:
- asyw->blend.src_color = 2; /* K1 */
- asyw->blend.dst_color = 4; /* NEG_K1 */
+ asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1;
+ asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1;
break;
}
if (memcmp(&armw->blend, &asyw->blend, sizeof(asyw->blend)))
@@ -609,7 +640,7 @@ nv50_wndw_destroy(struct drm_plane *plane)
nv50_wndw_ctxdma_del(ctxdma);
}
- nvif_notify_fini(&wndw->notify);
+ nvif_notify_dtor(&wndw->notify);
nv50_dmac_destroy(&wndw->wimm);
nv50_dmac_destroy(&wndw->wndw);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index a7412b9d3a98..3278e2880034 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -57,48 +57,59 @@ struct nv50_wndw_func {
void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
struct nv50_wndw_atom *asyw);
- void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
- void (*sema_clr)(struct nv50_wndw *);
+ int (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*sema_clr)(struct nv50_wndw *);
void (*ntfy_reset)(struct nouveau_bo *, u32 offset);
- void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
- void (*ntfy_clr)(struct nv50_wndw *);
+ int (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*ntfy_clr)(struct nv50_wndw *);
int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int);
void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
const struct drm_color_ctm *);
- void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
- void (*csc_clr)(struct nv50_wndw *);
+ int (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*csc_clr)(struct nv50_wndw *);
bool ilut_identity;
int ilut_size;
bool olut_core;
- void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
- void (*xlut_clr)(struct nv50_wndw *);
- void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
- void (*image_clr)(struct nv50_wndw *);
- void (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
- void (*blend_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-
- void (*update)(struct nv50_wndw *, u32 *interlock);
+ int (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*xlut_clr)(struct nv50_wndw *);
+ int (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*image_clr)(struct nv50_wndw *);
+ int (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*blend_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+ int (*update)(struct nv50_wndw *, u32 *interlock);
};
extern const struct drm_plane_funcs nv50_wndw;
void base507c_ntfy_reset(struct nouveau_bo *, u32);
+int base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int base507c_ntfy_clr(struct nv50_wndw *);
int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+int base507c_image_clr(struct nv50_wndw *);
+int base507c_update(struct nv50_wndw *, u32 *);
void base907c_csc(struct nv50_wndw *, struct nv50_wndw_atom *,
const struct drm_color_ctm *);
struct nv50_wimm_func {
- void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ int (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
- void (*update)(struct nv50_wndw *, u32 *interlock);
+ int (*update)(struct nv50_wndw *, u32 *interlock);
};
extern const struct nv50_wimm_func curs507a;
bool curs507a_space(struct nv50_wndw *);
+static inline __must_check int
+nvif_chan_wait(struct nv50_dmac *dmac, u32 size)
+{
+ struct nv50_wndw *wndw = container_of(dmac, typeof(*wndw), wimm);
+ return curs507a_space(wndw) ? 0 : -ETIMEDOUT;
+}
+
int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
int wndwc37e_new_(const struct nv50_wndw_func *, struct nouveau_drm *,
@@ -108,13 +119,13 @@ int wndwc37e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
void wndwc37e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
-void wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void wndwc37e_sema_clr(struct nv50_wndw *);
-void wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void wndwc37e_ntfy_clr(struct nv50_wndw *);
-void wndwc37e_image_clr(struct nv50_wndw *);
-void wndwc37e_blend_set(struct nv50_wndw *, struct nv50_wndw_atom *);
-void wndwc37e_update(struct nv50_wndw *, u32 *);
+int wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_sema_clr(struct nv50_wndw *);
+int wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_ntfy_clr(struct nv50_wndw *);
+int wndwc37e_image_clr(struct nv50_wndw *);
+int wndwc37e_blend_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index bb84e4d54a33..57df997c5ff3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -27,48 +27,59 @@
#include <nouveau_bo.h>
#include <nvif/clc37e.h>
+#include <nvif/pushc37b.h>
-static void
+#include <nvhw/class/clc37e.h>
+
+static int
wndwc37e_csc_clr(struct nv50_wndw *wndw)
{
+ return 0;
}
-static void
+static int
wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push, i;
- if ((push = evo_wait(&wndw->wndw, 13))) {
- evo_mthd(push, 0x02bc, 12);
- for (i = 0; i < 12; i++)
- evo_data(push, asyw->csc.matrix[i]);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CSC_RED2RED, asyw->csc.matrix, 12);
+ return 0;
}
-static void
+static int
wndwc37e_ilut_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x02b8, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_INPUT_LUT, 0x00000000);
+ return 0;
}
-static void
+static int
wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 4))) {
- evo_mthd(push, 0x02b0, 3);
- evo_data(push, asyw->xlut.i.output_mode << 8 |
- asyw->xlut.i.range << 4 |
- asyw->xlut.i.size);
- evo_data(push, asyw->xlut.i.offset >> 8);
- evo_data(push, asyw->xlut.handle);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTROL_INPUT_LUT,
+ NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, OUTPUT_MODE, asyw->xlut.i.output_mode) |
+ NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, RANGE, asyw->xlut.i.range) |
+ NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, SIZE, asyw->xlut.i.size),
+
+ SET_OFFSET_INPUT_LUT, asyw->xlut.i.offset >> 8,
+ SET_CONTEXT_DMA_INPUT_LUT, asyw->xlut.handle);
+ return 0;
}
static bool
@@ -77,144 +88,206 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
if (size != 256 && size != 1024)
return false;
- asyw->xlut.i.mode = 2;
- asyw->xlut.i.size = size == 1024 ? 2 : 0;
- asyw->xlut.i.range = 0;
- asyw->xlut.i.output_mode = 1;
+ asyw->xlut.i.size = size == 1024 ? NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 :
+ NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257;
+ asyw->xlut.i.range = NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY;
+ asyw->xlut.i.output_mode = NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE;
asyw->xlut.i.load = head907d_olut_load;
return true;
}
-void
+int
wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 8))) {
- evo_mthd(push, 0x02ec, 7);
- evo_data(push, asyw->blend.depth << 4);
- evo_data(push, asyw->blend.k1);
- evo_data(push, asyw->blend.dst_color << 12 |
- asyw->blend.dst_color << 8 |
- asyw->blend.src_color << 4 |
- asyw->blend.src_color);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 8)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_COMPOSITION_CONTROL,
+ NVDEF(NVC37E, SET_COMPOSITION_CONTROL, COLOR_KEY_SELECT, DISABLE) |
+ NVVAL(NVC37E, SET_COMPOSITION_CONTROL, DEPTH, asyw->blend.depth),
+
+ SET_COMPOSITION_CONSTANT_ALPHA,
+ NVVAL(NVC37E, SET_COMPOSITION_CONSTANT_ALPHA, K1, asyw->blend.k1) |
+ NVVAL(NVC37E, SET_COMPOSITION_CONSTANT_ALPHA, K2, 0),
+
+ SET_COMPOSITION_FACTOR_SELECT,
+ NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, SRC_COLOR_FACTOR_MATCH_SELECT,
+ asyw->blend.src_color) |
+ NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, SRC_COLOR_FACTOR_NO_MATCH_SELECT,
+ asyw->blend.src_color) |
+ NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, DST_COLOR_FACTOR_MATCH_SELECT,
+ asyw->blend.dst_color) |
+ NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, DST_COLOR_FACTOR_NO_MATCH_SELECT,
+ asyw->blend.dst_color),
+
+ SET_KEY_ALPHA,
+ NVVAL(NVC37E, SET_KEY_ALPHA, MIN, 0x0000) |
+ NVVAL(NVC37E, SET_KEY_ALPHA, MAX, 0xffff),
+
+ SET_KEY_RED_CR,
+ NVVAL(NVC37E, SET_KEY_RED_CR, MIN, 0x0000) |
+ NVVAL(NVC37E, SET_KEY_RED_CR, MAX, 0xffff),
+
+ SET_KEY_GREEN_Y,
+ NVVAL(NVC37E, SET_KEY_GREEN_Y, MIN, 0x0000) |
+ NVVAL(NVC37E, SET_KEY_GREEN_Y, MAX, 0xffff),
+
+ SET_KEY_BLUE_CB,
+ NVVAL(NVC37E, SET_KEY_BLUE_CB, MIN, 0x0000) |
+ NVVAL(NVC37E, SET_KEY_BLUE_CB, MAX, 0xffff));
+ return 0;
}
-void
+int
wndwc37e_image_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 4))) {
- evo_mthd(push, 0x0308, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0240, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_PRESENT_CONTROL,
+ NVVAL(NVC37E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) |
+ NVDEF(NVC37E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING));
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_ISO(0), 0x00000000);
+ return 0;
}
-static void
+static int
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
-
- if (!(push = evo_wait(&wndw->wndw, 17)))
- return;
-
- evo_mthd(push, 0x0308, 1);
- evo_data(push, asyw->image.mode << 4 | asyw->image.interval);
- evo_mthd(push, 0x0224, 4);
- evo_data(push, asyw->image.h << 16 | asyw->image.w);
- evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
- evo_data(push, asyw->csc.valid << 17 |
- asyw->image.colorspace << 8 |
- asyw->image.format);
- evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
- evo_mthd(push, 0x0240, 1);
- evo_data(push, asyw->image.handle[0]);
- evo_mthd(push, 0x0260, 1);
- evo_data(push, asyw->image.offset[0] >> 8);
- evo_mthd(push, 0x0290, 1);
- evo_data(push, (asyw->state.src_y >> 16) << 16 |
- (asyw->state.src_x >> 16));
- evo_mthd(push, 0x0298, 1);
- evo_data(push, (asyw->state.src_h >> 16) << 16 |
- (asyw->state.src_w >> 16));
- evo_mthd(push, 0x02a4, 1);
- evo_data(push, asyw->state.crtc_h << 16 |
- asyw->state.crtc_w);
- evo_kick(push, &wndw->wndw);
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_PRESENT_CONTROL,
+ NVVAL(NVC37E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC37E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC37E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC37E, SET_SIZE,
+ NVVAL(NVC37E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC37E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC37E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NVC37E, SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SET_PARAMS,
+ NVVAL(NVC37E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVVAL(NVC37E, SET_PARAMS, COLOR_SPACE, asyw->image.colorspace) |
+ NVDEF(NVC37E, SET_PARAMS, INPUT_RANGE, BYPASS) |
+ NVDEF(NVC37E, SET_PARAMS, UNDERREPLICATE, DISABLE) |
+ NVDEF(NVC37E, SET_PARAMS, DE_GAMMA, NONE) |
+ NVVAL(NVC37E, SET_PARAMS, CSC, asyw->csc.valid) |
+ NVDEF(NVC37E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC37E, SET_PARAMS, SWAP_UV, DISABLE),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC37E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC37E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC37E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC37E, SET_POINT_IN(0),
+ NVVAL(NVC37E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC37E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC37E, SET_SIZE_IN,
+ NVVAL(NVC37E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC37E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC37E, SET_SIZE_OUT,
+ NVVAL(NVC37E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC37E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
}
-void
+int
wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x021c, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_NOTIFIER, 0x00000000);
+ return 0;
}
-void
+int
wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 3))) {
- evo_mthd(push, 0x021c, 2);
- evo_data(push, asyw->ntfy.handle);
- evo_data(push, asyw->ntfy.offset | asyw->ntfy.awaken);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 3)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_NOTIFIER, asyw->ntfy.handle,
+
+ SET_NOTIFIER_CONTROL,
+ NVVAL(NVC37E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken) |
+ NVVAL(NVC37E, SET_NOTIFIER_CONTROL, OFFSET, asyw->ntfy.offset >> 4));
+ return 0;
}
-void
+int
wndwc37e_sema_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x0218, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_SEMAPHORE, 0x00000000);
+ return 0;
}
-void
+int
wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 5))) {
- evo_mthd(push, 0x020c, 4);
- evo_data(push, asyw->sema.offset);
- evo_data(push, asyw->sema.acquire);
- evo_data(push, asyw->sema.release);
- evo_data(push, asyw->sema.handle);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_SEMAPHORE_CONTROL, asyw->sema.offset,
+ SET_SEMAPHORE_ACQUIRE, asyw->sema.acquire,
+ SET_SEMAPHORE_RELEASE, asyw->sema.release,
+ SET_CONTEXT_DMA_SEMAPHORE, asyw->sema.handle);
+ return 0;
}
-void
+int
wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 5))) {
- evo_mthd(push, 0x0370, 2);
- evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
- interlock[NV50_DISP_INTERLOCK_CORE]);
- evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
- evo_mthd(push, 0x0200, 1);
- if (interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)
- evo_data(push, 0x00001001);
- else
- evo_data(push, 0x00000001);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 5)))
+ return ret;
+
+ PUSH_MTHD(push, NVC37E, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
+ interlock[NV50_DISP_INTERLOCK_CORE],
+ SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+
+ PUSH_MTHD(push, NVC37E, UPDATE, 0x00000001 |
+ NVVAL(NVC37E, UPDATE, INTERLOCK_WITH_WIN_IMM,
+ !!(interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)));
+
+ return PUSH_KICK(push);
}
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 1d64741595ba..429be0bb0222 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -27,97 +27,120 @@
#include <nouveau_bo.h>
#include <nvif/clc37e.h>
+#include <nvif/pushc37b.h>
-static void
+#include <nvhw/class/clc57e.h>
+
+static int
wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
-
- if (!(push = evo_wait(&wndw->wndw, 17)))
- return;
-
- evo_mthd(push, 0x0308, 1);
- evo_data(push, asyw->image.mode << 4 | asyw->image.interval);
- evo_mthd(push, 0x0224, 4);
- evo_data(push, asyw->image.h << 16 | asyw->image.w);
- evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
- evo_data(push, asyw->image.colorspace << 8 |
- asyw->image.format);
- evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
- evo_mthd(push, 0x0240, 1);
- evo_data(push, asyw->image.handle[0]);
- evo_mthd(push, 0x0260, 1);
- evo_data(push, asyw->image.offset[0] >> 8);
- evo_mthd(push, 0x0290, 1);
- evo_data(push, (asyw->state.src_y >> 16) << 16 |
- (asyw->state.src_x >> 16));
- evo_mthd(push, 0x0298, 1);
- evo_data(push, (asyw->state.src_h >> 16) << 16 |
- (asyw->state.src_w >> 16));
- evo_mthd(push, 0x02a4, 1);
- evo_data(push, asyw->state.crtc_h << 16 |
- asyw->state.crtc_w);
- evo_kick(push, &wndw->wndw);
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 17)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE,
+ NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
+ NVVAL(NVC57E, SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
+
+ SET_PARAMS,
+ NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+ PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+ PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+ NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+ NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+ NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+ return 0;
}
-static void
+static int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 13))) {
- evo_mthd(push, 0x0400, 12);
- evo_data(push, 0x00010000);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00010000);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00010000);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ const u32 identity[12] = {
+ 0x00010000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00010000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00010000, 0x00000000,
+ };
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 1 + ARRAY_SIZE(identity))))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_FMT_COEFFICIENT_C00, identity, ARRAY_SIZE(identity));
+ return 0;
}
-static void
+static int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push, i;
- if ((push = evo_wait(&wndw->wndw, 13))) {
- evo_mthd(push, 0x0400, 12);
- for (i = 0; i < 12; i++)
- evo_data(push, asyw->csc.matrix[i]);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 13)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_FMT_COEFFICIENT_C00, asyw->csc.matrix, 12);
+ return 0;
}
-static void
+static int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 2))) {
- evo_mthd(push, 0x0444, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ILUT, 0x00000000);
+ return 0;
}
-static void
+static int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- u32 *push;
- if ((push = evo_wait(&wndw->wndw, 4))) {
- evo_mthd(push, 0x0440, 3);
- evo_data(push, asyw->xlut.i.size << 8 |
- asyw->xlut.i.mode << 2 |
- asyw->xlut.i.output_mode);
- evo_data(push, asyw->xlut.handle);
- evo_data(push, asyw->xlut.i.offset >> 8);
- evo_kick(push, &wndw->wndw);
- }
+ struct nvif_push *push = wndw->wndw.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 4)))
+ return ret;
+
+ PUSH_MTHD(push, NVC57E, SET_ILUT_CONTROL,
+ NVVAL(NVC57E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) |
+ NVVAL(NVC57E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) |
+ NVVAL(NVC57E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode),
+
+ SET_CONTEXT_DMA_ILUT, asyw->xlut.handle,
+ SET_OFFSET_ILUT, asyw->xlut.i.offset >> 8);
+ return 0;
}
static u16
@@ -162,13 +185,13 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
if (size = size ? size : 1024, size != 256 && size != 1024)
return false;
- if (size == 256) {
- asyw->xlut.i.mode = 1; /* DIRECT8. */
- } else {
- asyw->xlut.i.mode = 2; /* DIRECT10. */
- }
+ if (size == 256)
+ asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8;
+ else
+ asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10;
+
asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
- asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
+ asyw->xlut.i.output_mode = NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE;
asyw->xlut.i.load = wndwc57e_ilut_load;
return true;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/pmu.h b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
index 452ed7d03827..64a51e7c4543 100644
--- a/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
@@ -25,7 +25,7 @@ struct nv_pmu_args {
#define NV_PMU_UNIT_ACR 0x0a
struct nv_pmu_init_msg {
- struct nv_falcon_msg hdr;
+ struct nvfw_falcon_msg hdr;
#define NV_PMU_INIT_MSG_INIT 0x00
u8 msg_type;
@@ -44,7 +44,7 @@ struct nv_pmu_init_msg {
};
struct nv_pmu_acr_cmd {
- struct nv_falcon_cmd hdr;
+ struct nvfw_falcon_cmd hdr;
#define NV_PMU_ACR_CMD_INIT_WPR_REGION 0x00
#define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON 0x01
#define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS 0x03
@@ -52,7 +52,7 @@ struct nv_pmu_acr_cmd {
};
struct nv_pmu_acr_msg {
- struct nv_falcon_cmd hdr;
+ struct nvfw_falcon_cmd hdr;
u8 msg_type;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/sec2.h b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
index 03496558b775..9a37ad4179cb 100644
--- a/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
@@ -13,7 +13,7 @@ struct nv_sec2_args {
#define NV_SEC2_UNIT_ACR 0x08
struct nv_sec2_init_msg {
- struct nv_falcon_msg hdr;
+ struct nvfw_falcon_msg hdr;
#define NV_SEC2_INIT_MSG_INIT 0x00
u8 msg_type;
@@ -34,13 +34,13 @@ struct nv_sec2_init_msg {
};
struct nv_sec2_acr_cmd {
- struct nv_falcon_cmd hdr;
+ struct nvfw_falcon_cmd hdr;
#define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON 0x00
u8 cmd_type;
};
struct nv_sec2_acr_msg {
- struct nv_falcon_cmd hdr;
+ struct nvfw_falcon_cmd hdr;
u8 msg_type;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl0039.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl0039.h
new file mode 100644
index 000000000000..5386ed64ab72
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl0039.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl0039_h_
+#define _cl0039_h_
+
+/* dma method offsets, fields, and values */
+#define NV039_SET_OBJECT (0x00000000)
+#define NV039_NO_OPERATION (0x00000100)
+#define NV039_SET_CONTEXT_DMA_NOTIFIES (0x00000180)
+#define NV039_SET_CONTEXT_DMA_BUFFER_IN (0x00000184)
+#define NV039_SET_CONTEXT_DMA_BUFFER_OUT (0x00000188)
+
+#define NV039_OFFSET_IN (0x0000030C)
+#define NV039_OFFSET_OUT (0x00000310)
+#define NV039_PITCH_IN (0x00000314)
+#define NV039_PITCH_OUT (0x00000318)
+#define NV039_LINE_LENGTH_IN (0x0000031C)
+#define NV039_LINE_COUNT (0x00000320)
+#define NV039_FORMAT (0x00000324)
+#define NV039_FORMAT_IN 7:0
+#define NV039_FORMAT_OUT 31:8
+#define NV039_BUFFER_NOTIFY (0x00000328)
+#define NV039_BUFFER_NOTIFY_WRITE_ONLY (0x00000000)
+#define NV039_BUFFER_NOTIFY_WRITE_THEN_AWAKEN (0x00000001)
+#endif /* _cl0039_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl006c.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl006c.h
new file mode 100644
index 000000000000..9ab2a22659e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl006c.h
@@ -0,0 +1,46 @@
+/*******************************************************************************
+ Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl006c_h_
+#define _cl006c_h_
+
+/* fields and values */
+#define NV06C_PUT (0x00000040)
+#define NV06C_PUT_PTR 31:2
+#define NV06C_GET (0x00000044)
+#define NV06C_GET_PTR 31:2
+
+/* dma method descriptor format */
+#define NV06C_METHOD_ADDRESS 12:2
+#define NV06C_METHOD_SUBCHANNEL 15:13
+#define NV06C_METHOD_COUNT 28:18
+#define NV06C_OPCODE 31:29
+#define NV06C_OPCODE_METHOD (0x00000000)
+#define NV06C_OPCODE_NONINC_METHOD (0x00000002)
+
+/* dma data format */
+#define NV06C_DATA 31:0
+
+/* dma jump format */
+#define NV06C_OPCODE_JUMP (0x00000001)
+#define NV06C_JUMP_OFFSET 28:2
+#endif /* _cl006c_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl006e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl006e.h
new file mode 100644
index 000000000000..8cfb59662038
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl006e.h
@@ -0,0 +1,30 @@
+/*******************************************************************************
+ Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl006e_h_
+#define _cl006e_h_
+
+/* fields and values */
+#define NV06E_SET_OBJECT (0x00000000)
+#define NV06E_REFERENCE (0x00000048)
+#define NV06E_SET_REFERENCE (0x00000050)
+#endif /* _cl006e_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl176e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl176e.h
new file mode 100644
index 000000000000..fa09725c8aea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl176e.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _cl176e_h_
+#define _cl176e_h_
+
+#define NV176E_SET_OBJECT (0x00000000)
+#define NV176E_SET_CONTEXT_DMA_SEMAPHORE (0x00000060)
+#define NV176E_SEMAPHORE_OFFSET (0x00000064)
+#define NV176E_SEMAPHORE_ACQUIRE (0x00000068)
+#define NV176E_SEMAPHORE_RELEASE (0x0000006c)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl206e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl206e.h
new file mode 100644
index 000000000000..27313c7c43cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl206e.h
@@ -0,0 +1,35 @@
+/*******************************************************************************
+ Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl206e_h_
+#define _cl206e_h_
+
+/* dma opcode2 format */
+#define NV206E_DMA_OPCODE2 1:0
+#define NV206E_DMA_OPCODE2_NONE (0x00000000)
+/* dma jump_long format */
+#define NV206E_DMA_OPCODE2_JUMP_LONG (0x00000001)
+#define NV206E_DMA_JUMP_LONG_OFFSET 31:2
+/* dma call format */
+#define NV206E_DMA_OPCODE2_CALL (0x00000002)
+#define NV206E_DMA_CALL_OFFSET 31:2
+#endif /* _cl206e_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl502d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl502d.h
new file mode 100644
index 000000000000..47fe91b67e74
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl502d.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2003 - 2004, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl_nv50_twod_h_
+#define _cl_nv50_twod_h_
+
+#define NV502D_SET_OBJECT 0x0000
+#define NV502D_SET_OBJECT_POINTER 15:0
+
+#define NV502D_WAIT_FOR_IDLE 0x0110
+#define NV502D_WAIT_FOR_IDLE_V 31:0
+
+#define NV502D_SET_DST_CONTEXT_DMA 0x0184
+#define NV502D_SET_DST_CONTEXT_DMA_HANDLE 31:0
+
+#define NV502D_SET_SRC_CONTEXT_DMA 0x0188
+#define NV502D_SET_SRC_CONTEXT_DMA_HANDLE 31:0
+
+#define NV502D_SET_SEMAPHORE_CONTEXT_DMA 0x018c
+#define NV502D_SET_SEMAPHORE_CONTEXT_DMA_HANDLE 31:0
+
+#define NV502D_SET_DST_FORMAT 0x0200
+#define NV502D_SET_DST_FORMAT_V 7:0
+#define NV502D_SET_DST_FORMAT_V_A8R8G8B8 0x000000CF
+#define NV502D_SET_DST_FORMAT_V_A8RL8GL8BL8 0x000000D0
+#define NV502D_SET_DST_FORMAT_V_A2R10G10B10 0x000000DF
+#define NV502D_SET_DST_FORMAT_V_A8B8G8R8 0x000000D5
+#define NV502D_SET_DST_FORMAT_V_A8BL8GL8RL8 0x000000D6
+#define NV502D_SET_DST_FORMAT_V_A2B10G10R10 0x000000D1
+#define NV502D_SET_DST_FORMAT_V_X8R8G8B8 0x000000E6
+#define NV502D_SET_DST_FORMAT_V_X8RL8GL8BL8 0x000000E7
+#define NV502D_SET_DST_FORMAT_V_X8B8G8R8 0x000000F9
+#define NV502D_SET_DST_FORMAT_V_X8BL8GL8RL8 0x000000FA
+#define NV502D_SET_DST_FORMAT_V_R5G6B5 0x000000E8
+#define NV502D_SET_DST_FORMAT_V_A1R5G5B5 0x000000E9
+#define NV502D_SET_DST_FORMAT_V_X1R5G5B5 0x000000F8
+#define NV502D_SET_DST_FORMAT_V_Y8 0x000000F3
+#define NV502D_SET_DST_FORMAT_V_Y16 0x000000EE
+#define NV502D_SET_DST_FORMAT_V_Y32 0x000000FF
+#define NV502D_SET_DST_FORMAT_V_Z1R5G5B5 0x000000FB
+#define NV502D_SET_DST_FORMAT_V_O1R5G5B5 0x000000FC
+#define NV502D_SET_DST_FORMAT_V_Z8R8G8B8 0x000000FD
+#define NV502D_SET_DST_FORMAT_V_O8R8G8B8 0x000000FE
+#define NV502D_SET_DST_FORMAT_V_Y1_8X8 0x0000001C
+#define NV502D_SET_DST_FORMAT_V_RF16 0x000000F2
+#define NV502D_SET_DST_FORMAT_V_RF32 0x000000E5
+#define NV502D_SET_DST_FORMAT_V_RF32_GF32 0x000000CB
+#define NV502D_SET_DST_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA
+#define NV502D_SET_DST_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE
+#define NV502D_SET_DST_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0
+#define NV502D_SET_DST_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3
+
+#define NV502D_SET_DST_MEMORY_LAYOUT 0x0204
+#define NV502D_SET_DST_MEMORY_LAYOUT_V 0:0
+#define NV502D_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000
+#define NV502D_SET_DST_MEMORY_LAYOUT_V_PITCH 0x00000001
+
+#define NV502D_SET_DST_PITCH 0x0214
+#define NV502D_SET_DST_PITCH_V 31:0
+
+#define NV502D_SET_DST_WIDTH 0x0218
+#define NV502D_SET_DST_WIDTH_V 31:0
+
+#define NV502D_SET_DST_HEIGHT 0x021c
+#define NV502D_SET_DST_HEIGHT_V 31:0
+
+#define NV502D_SET_DST_OFFSET_UPPER 0x0220
+#define NV502D_SET_DST_OFFSET_UPPER_V 7:0
+
+#define NV502D_SET_DST_OFFSET_LOWER 0x0224
+#define NV502D_SET_DST_OFFSET_LOWER_V 31:0
+
+#define NV502D_SET_SRC_FORMAT 0x0230
+#define NV502D_SET_SRC_FORMAT_V 7:0
+#define NV502D_SET_SRC_FORMAT_V_A8R8G8B8 0x000000CF
+#define NV502D_SET_SRC_FORMAT_V_A8RL8GL8BL8 0x000000D0
+#define NV502D_SET_SRC_FORMAT_V_A2R10G10B10 0x000000DF
+#define NV502D_SET_SRC_FORMAT_V_A8B8G8R8 0x000000D5
+#define NV502D_SET_SRC_FORMAT_V_A8BL8GL8RL8 0x000000D6
+#define NV502D_SET_SRC_FORMAT_V_A2B10G10R10 0x000000D1
+#define NV502D_SET_SRC_FORMAT_V_X8R8G8B8 0x000000E6
+#define NV502D_SET_SRC_FORMAT_V_X8RL8GL8BL8 0x000000E7
+#define NV502D_SET_SRC_FORMAT_V_X8B8G8R8 0x000000F9
+#define NV502D_SET_SRC_FORMAT_V_X8BL8GL8RL8 0x000000FA
+#define NV502D_SET_SRC_FORMAT_V_R5G6B5 0x000000E8
+#define NV502D_SET_SRC_FORMAT_V_A1R5G5B5 0x000000E9
+#define NV502D_SET_SRC_FORMAT_V_X1R5G5B5 0x000000F8
+#define NV502D_SET_SRC_FORMAT_V_Y8 0x000000F3
+#define NV502D_SET_SRC_FORMAT_V_AY8 0x0000001D
+#define NV502D_SET_SRC_FORMAT_V_Y16 0x000000EE
+#define NV502D_SET_SRC_FORMAT_V_Y32 0x000000FF
+#define NV502D_SET_SRC_FORMAT_V_Z1R5G5B5 0x000000FB
+#define NV502D_SET_SRC_FORMAT_V_O1R5G5B5 0x000000FC
+#define NV502D_SET_SRC_FORMAT_V_Z8R8G8B8 0x000000FD
+#define NV502D_SET_SRC_FORMAT_V_O8R8G8B8 0x000000FE
+#define NV502D_SET_SRC_FORMAT_V_Y1_8X8 0x0000001C
+#define NV502D_SET_SRC_FORMAT_V_RF16 0x000000F2
+#define NV502D_SET_SRC_FORMAT_V_RF32 0x000000E5
+#define NV502D_SET_SRC_FORMAT_V_RF32_GF32 0x000000CB
+#define NV502D_SET_SRC_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA
+#define NV502D_SET_SRC_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE
+#define NV502D_SET_SRC_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0
+#define NV502D_SET_SRC_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3
+
+#define NV502D_SET_SRC_MEMORY_LAYOUT 0x0234
+#define NV502D_SET_SRC_MEMORY_LAYOUT_V 0:0
+#define NV502D_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000
+#define NV502D_SET_SRC_MEMORY_LAYOUT_V_PITCH 0x00000001
+
+#define NV502D_SET_SRC_PITCH 0x0244
+#define NV502D_SET_SRC_PITCH_V 31:0
+
+#define NV502D_SET_SRC_WIDTH 0x0248
+#define NV502D_SET_SRC_WIDTH_V 31:0
+
+#define NV502D_SET_SRC_HEIGHT 0x024c
+#define NV502D_SET_SRC_HEIGHT_V 31:0
+
+#define NV502D_SET_SRC_OFFSET_UPPER 0x0250
+#define NV502D_SET_SRC_OFFSET_UPPER_V 7:0
+
+#define NV502D_SET_SRC_OFFSET_LOWER 0x0254
+#define NV502D_SET_SRC_OFFSET_LOWER_V 31:0
+
+#define NV502D_SET_CLIP_ENABLE 0x0290
+#define NV502D_SET_CLIP_ENABLE_V 0:0
+#define NV502D_SET_CLIP_ENABLE_V_FALSE 0x00000000
+#define NV502D_SET_CLIP_ENABLE_V_TRUE 0x00000001
+
+#define NV502D_SET_ROP 0x02a0
+#define NV502D_SET_ROP_V 7:0
+
+#define NV502D_SET_OPERATION 0x02ac
+#define NV502D_SET_OPERATION_V 2:0
+#define NV502D_SET_OPERATION_V_SRCCOPY_AND 0x00000000
+#define NV502D_SET_OPERATION_V_ROP_AND 0x00000001
+#define NV502D_SET_OPERATION_V_BLEND_AND 0x00000002
+#define NV502D_SET_OPERATION_V_SRCCOPY 0x00000003
+#define NV502D_SET_OPERATION_V_ROP 0x00000004
+#define NV502D_SET_OPERATION_V_SRCCOPY_PREMULT 0x00000005
+#define NV502D_SET_OPERATION_V_BLEND_PREMULT 0x00000006
+
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT 0x02e8
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V 2:0
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8R5G6B5 0x00000000
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A1R5G5B5 0x00000001
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8R8G8B8 0x00000002
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8Y8 0x00000003
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8Y16 0x00000004
+#define NV502D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_Y32 0x00000005
+
+#define NV502D_SET_MONOCHROME_PATTERN_FORMAT 0x02ec
+#define NV502D_SET_MONOCHROME_PATTERN_FORMAT_V 0:0
+#define NV502D_SET_MONOCHROME_PATTERN_FORMAT_V_CGA6_M1 0x00000000
+#define NV502D_SET_MONOCHROME_PATTERN_FORMAT_V_LE_M1 0x00000001
+
+#define NV502D_RENDER_SOLID_PRIM_MODE 0x0580
+#define NV502D_RENDER_SOLID_PRIM_MODE_V 2:0
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_POINTS 0x00000000
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_LINES 0x00000001
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_POLYLINE 0x00000002
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_TRIANGLES 0x00000003
+#define NV502D_RENDER_SOLID_PRIM_MODE_V_RECTS 0x00000004
+
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT 0x0584
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V 7:0
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8R8G8B8 0x000000CF
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2R10G10B10 0x000000DF
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8B8G8R8 0x000000D5
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2B10G10R10 0x000000D1
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8R8G8B8 0x000000E6
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8B8G8R8 0x000000F9
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_R5G6B5 0x000000E8
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A1R5G5B5 0x000000E9
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X1R5G5B5 0x000000F8
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y8 0x000000F3
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y16 0x000000EE
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y32 0x000000FF
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z1R5G5B5 0x000000FB
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O1R5G5B5 0x000000FC
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z8R8G8B8 0x000000FD
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O8R8G8B8 0x000000FE
+
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR 0x0588
+#define NV502D_SET_RENDER_SOLID_PRIM_COLOR_V 31:0
+
+#define NV502D_RENDER_SOLID_PRIM_POINT_SET_X(j) (0x0600+(j)*8)
+#define NV502D_RENDER_SOLID_PRIM_POINT_SET_X_V 31:0
+
+#define NV502D_RENDER_SOLID_PRIM_POINT_Y(j) (0x0604+(j)*8)
+#define NV502D_RENDER_SOLID_PRIM_POINT_Y_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DATA_TYPE 0x0800
+#define NV502D_SET_PIXELS_FROM_CPU_DATA_TYPE_V 0:0
+#define NV502D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_COLOR 0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_INDEX 0x00000001
+
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT 0x0804
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V 7:0
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8R8G8B8 0x000000CF
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2R10G10B10 0x000000DF
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8B8G8R8 0x000000D5
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2B10G10R10 0x000000D1
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8R8G8B8 0x000000E6
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8B8G8R8 0x000000F9
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_R5G6B5 0x000000E8
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A1R5G5B5 0x000000E9
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X1R5G5B5 0x000000F8
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y8 0x000000F3
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y16 0x000000EE
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y32 0x000000FF
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z1R5G5B5 0x000000FB
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O1R5G5B5 0x000000FC
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z8R8G8B8 0x000000FD
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O8R8G8B8 0x000000FE
+
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT 0x0808
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V 1:0
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I1 0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I4 0x00000001
+#define NV502D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I8 0x00000002
+
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_FORMAT 0x080c
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V 0:0
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_CGA6_M1 0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_LE_M1 0x00000001
+
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP 0x0810
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP_V 1:0
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_PIXEL 0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_BYTE 0x00000001
+#define NV502D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_DWORD 0x00000002
+
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR0 0x0814
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR0_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR1 0x0818
+#define NV502D_SET_PIXELS_FROM_CPU_COLOR1_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_OPACITY 0x081c
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V 0:0
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_TRANSPARENT 0x00000000
+#define NV502D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_OPAQUE 0x00000001
+
+#define NV502D_SET_PIXELS_FROM_CPU_SRC_WIDTH 0x0838
+#define NV502D_SET_PIXELS_FROM_CPU_SRC_WIDTH_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_SRC_HEIGHT 0x083c
+#define NV502D_SET_PIXELS_FROM_CPU_SRC_HEIGHT_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DX_DU_FRAC 0x0840
+#define NV502D_SET_PIXELS_FROM_CPU_DX_DU_FRAC_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DX_DU_INT 0x0844
+#define NV502D_SET_PIXELS_FROM_CPU_DX_DU_INT_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DY_DV_FRAC 0x0848
+#define NV502D_SET_PIXELS_FROM_CPU_DY_DV_FRAC_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DY_DV_INT 0x084c
+#define NV502D_SET_PIXELS_FROM_CPU_DY_DV_INT_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DST_X0_FRAC 0x0850
+#define NV502D_SET_PIXELS_FROM_CPU_DST_X0_FRAC_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DST_X0_INT 0x0854
+#define NV502D_SET_PIXELS_FROM_CPU_DST_X0_INT_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC 0x0858
+#define NV502D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_CPU_DST_Y0_INT 0x085c
+#define NV502D_SET_PIXELS_FROM_CPU_DST_Y0_INT_V 31:0
+
+#define NV502D_PIXELS_FROM_CPU_DATA 0x0860
+#define NV502D_PIXELS_FROM_CPU_DATA_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP 0x0888
+#define NV502D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V 0:0
+#define NV502D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_FALSE 0x00000000
+#define NV502D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_TRUE 0x00000001
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_X0 0x08b0
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_X0_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_Y0 0x08b4
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_Y0_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_WIDTH 0x08b8
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_WIDTH_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT 0x08bc
+#define NV502D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC 0x08c0
+#define NV502D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DU_DX_INT 0x08c4
+#define NV502D_SET_PIXELS_FROM_MEMORY_DU_DX_INT_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC 0x08c8
+#define NV502D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_DV_DY_INT 0x08cc
+#define NV502D_SET_PIXELS_FROM_MEMORY_DV_DY_INT_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC 0x08d0
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT 0x08d4
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT_V 31:0
+
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC 0x08d8
+#define NV502D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC_V 31:0
+
+#define NV502D_PIXELS_FROM_MEMORY_SRC_Y0_INT 0x08dc
+#define NV502D_PIXELS_FROM_MEMORY_SRC_Y0_INT_V 31:0
+#endif /* _cl_nv50_twod_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl5039.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl5039.h
new file mode 100644
index 000000000000..5b2ca337cf2b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl5039.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2003-2004, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl_nv50_memory_to_memory_format_h_
+#define _cl_nv50_memory_to_memory_format_h_
+
+#define NV5039_SET_OBJECT 0x0000
+#define NV5039_SET_OBJECT_POINTER 15:0
+
+#define NV5039_NO_OPERATION 0x0100
+#define NV5039_NO_OPERATION_V 31:0
+
+#define NV5039_SET_CONTEXT_DMA_NOTIFY 0x0180
+#define NV5039_SET_CONTEXT_DMA_NOTIFY_HANDLE 31:0
+
+#define NV5039_SET_CONTEXT_DMA_BUFFER_IN 0x0184
+#define NV5039_SET_CONTEXT_DMA_BUFFER_IN_HANDLE 31:0
+
+#define NV5039_SET_CONTEXT_DMA_BUFFER_OUT 0x0188
+#define NV5039_SET_CONTEXT_DMA_BUFFER_OUT_HANDLE 31:0
+
+#define NV5039_SET_SRC_MEMORY_LAYOUT 0x0200
+#define NV5039_SET_SRC_MEMORY_LAYOUT_V 0:0
+#define NV5039_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000
+#define NV5039_SET_SRC_MEMORY_LAYOUT_V_PITCH 0x00000001
+
+#define NV5039_SET_SRC_BLOCK_SIZE 0x0204
+#define NV5039_SET_SRC_BLOCK_SIZE_WIDTH 3:0
+#define NV5039_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT 7:4
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004
+#define NV5039_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH 11:8
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004
+#define NV5039_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005
+
+#define NV5039_SET_SRC_WIDTH 0x0208
+#define NV5039_SET_SRC_WIDTH_V 31:0
+
+#define NV5039_SET_SRC_HEIGHT 0x020c
+#define NV5039_SET_SRC_HEIGHT_V 31:0
+
+#define NV5039_SET_SRC_DEPTH 0x0210
+#define NV5039_SET_SRC_DEPTH_V 31:0
+
+#define NV5039_SET_SRC_LAYER 0x0214
+#define NV5039_SET_SRC_LAYER_V 31:0
+
+#define NV5039_SET_SRC_ORIGIN 0x0218
+#define NV5039_SET_SRC_ORIGIN_X 15:0
+#define NV5039_SET_SRC_ORIGIN_Y 31:16
+
+#define NV5039_SET_DST_MEMORY_LAYOUT 0x021c
+#define NV5039_SET_DST_MEMORY_LAYOUT_V 0:0
+#define NV5039_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000
+#define NV5039_SET_DST_MEMORY_LAYOUT_V_PITCH 0x00000001
+
+#define NV5039_SET_DST_BLOCK_SIZE 0x0220
+#define NV5039_SET_DST_BLOCK_SIZE_WIDTH 3:0
+#define NV5039_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT 7:4
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004
+#define NV5039_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH 11:8
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004
+#define NV5039_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005
+
+#define NV5039_SET_DST_WIDTH 0x0224
+#define NV5039_SET_DST_WIDTH_V 31:0
+
+#define NV5039_SET_DST_HEIGHT 0x0228
+#define NV5039_SET_DST_HEIGHT_V 31:0
+
+#define NV5039_SET_DST_DEPTH 0x022c
+#define NV5039_SET_DST_DEPTH_V 31:0
+
+#define NV5039_SET_DST_LAYER 0x0230
+#define NV5039_SET_DST_LAYER_V 31:0
+
+#define NV5039_SET_DST_ORIGIN 0x0234
+#define NV5039_SET_DST_ORIGIN_X 15:0
+#define NV5039_SET_DST_ORIGIN_Y 31:16
+
+#define NV5039_OFFSET_IN_UPPER 0x0238
+#define NV5039_OFFSET_IN_UPPER_VALUE 7:0
+
+#define NV5039_OFFSET_OUT_UPPER 0x023c
+#define NV5039_OFFSET_OUT_UPPER_VALUE 7:0
+
+#define NV5039_OFFSET_IN 0x030c
+#define NV5039_OFFSET_IN_VALUE 31:0
+
+#define NV5039_OFFSET_OUT 0x0310
+#define NV5039_OFFSET_OUT_VALUE 31:0
+
+#define NV5039_PITCH_IN 0x0314
+#define NV5039_PITCH_IN_VALUE 31:0
+
+#define NV5039_PITCH_OUT 0x0318
+#define NV5039_PITCH_OUT_VALUE 31:0
+
+#define NV5039_LINE_LENGTH_IN 0x031c
+#define NV5039_LINE_LENGTH_IN_VALUE 31:0
+
+#define NV5039_LINE_COUNT 0x0320
+#define NV5039_LINE_COUNT_VALUE 31:0
+
+#define NV5039_FORMAT 0x0324
+#define NV5039_FORMAT_IN 7:0
+#define NV5039_FORMAT_IN_ONE 0x00000001
+#define NV5039_FORMAT_OUT 15:8
+#define NV5039_FORMAT_OUT_ONE 0x00000001
+
+#define NV5039_BUFFER_NOTIFY 0x0328
+#define NV5039_BUFFER_NOTIFY_TYPE 31:0
+#define NV5039_BUFFER_NOTIFY_TYPE_WRITE_ONLY 0x00000000
+#define NV5039_BUFFER_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001
+#endif /* _cl_nv50_memory_to_memory_format_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507a.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507a.h
new file mode 100644
index 000000000000..a97bcec1ab9a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507a.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl507a_h_
+#define _cl507a_h_
+
+#define NV507A_FREE (0x00000008)
+#define NV507A_FREE_COUNT 5:0
+#define NV507A_UPDATE (0x00000080)
+#define NV507A_UPDATE_INTERLOCK_WITH_CORE 0:0
+#define NV507A_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000)
+#define NV507A_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001)
+#define NV507A_SET_CURSOR_HOT_SPOT_POINT_OUT (0x00000084)
+#define NV507A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0
+#define NV507A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16
+#endif // _cl507a_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507c.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507c.h
new file mode 100644
index 000000000000..ada17015daf2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507c.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl507c_h_
+#define _cl507c_h_
+
+#define NV_DISP_BASE_NOTIFIER_1 0x00000000
+#define NV_DISP_BASE_NOTIFIER_1_SIZEOF 0x00000004
+#define NV_DISP_BASE_NOTIFIER_1__0 0x00000000
+#define NV_DISP_BASE_NOTIFIER_1__0_PRESENTATION_COUNT 15:0
+#define NV_DISP_BASE_NOTIFIER_1__0_TIMESTAMP 29:16
+#define NV_DISP_BASE_NOTIFIER_1__0_STATUS 31:30
+#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN 0x00000000
+#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN 0x00000001
+#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED 0x00000002
+
+
+// dma opcode instructions
+#define NV507C_DMA 0x00000000
+#define NV507C_DMA_OPCODE 31:29
+#define NV507C_DMA_OPCODE_METHOD 0x00000000
+#define NV507C_DMA_OPCODE_JUMP 0x00000001
+#define NV507C_DMA_OPCODE_NONINC_METHOD 0x00000002
+#define NV507C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003
+#define NV507C_DMA_OPCODE 31:29
+#define NV507C_DMA_OPCODE_METHOD 0x00000000
+#define NV507C_DMA_OPCODE_NONINC_METHOD 0x00000002
+#define NV507C_DMA_METHOD_COUNT 27:18
+#define NV507C_DMA_METHOD_OFFSET 11:2
+#define NV507C_DMA_DATA 31:0
+#define NV507C_DMA_NOP 0x00000000
+#define NV507C_DMA_OPCODE 31:29
+#define NV507C_DMA_OPCODE_JUMP 0x00000001
+#define NV507C_DMA_JUMP_OFFSET 11:2
+#define NV507C_DMA_OPCODE 31:29
+#define NV507C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003
+#define NV507C_DMA_SET_SUBDEVICE_MASK_VALUE 11:0
+
+// class methods
+#define NV507C_PUT (0x00000000)
+#define NV507C_PUT_PTR 11:2
+#define NV507C_GET (0x00000004)
+#define NV507C_GET_PTR 11:2
+#define NV507C_UPDATE (0x00000080)
+#define NV507C_UPDATE_INTERLOCK_WITH_CORE 0:0
+#define NV507C_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000)
+#define NV507C_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001)
+#define NV507C_SET_PRESENT_CONTROL (0x00000084)
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002)
+#define NV507C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4
+#define NV507C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16
+#define NV507C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10
+#define NV507C_SET_SEMAPHORE_CONTROL (0x00000088)
+#define NV507C_SET_SEMAPHORE_CONTROL_OFFSET 11:2
+#define NV507C_SET_SEMAPHORE_ACQUIRE (0x0000008C)
+#define NV507C_SET_SEMAPHORE_ACQUIRE_VALUE 31:0
+#define NV507C_SET_SEMAPHORE_RELEASE (0x00000090)
+#define NV507C_SET_SEMAPHORE_RELEASE_VALUE 31:0
+#define NV507C_SET_CONTEXT_DMA_SEMAPHORE (0x00000094)
+#define NV507C_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0
+#define NV507C_SET_NOTIFIER_CONTROL (0x000000A0)
+#define NV507C_SET_NOTIFIER_CONTROL_MODE 30:30
+#define NV507C_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NV507C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NV507C_SET_NOTIFIER_CONTROL_OFFSET 11:2
+#define NV507C_SET_CONTEXT_DMA_NOTIFIER (0x000000A4)
+#define NV507C_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0
+#define NV507C_SET_CONTEXT_DMA_ISO (0x000000C0)
+#define NV507C_SET_CONTEXT_DMA_ISO_HANDLE 31:0
+#define NV507C_SET_BASE_LUT_LO (0x000000E0)
+#define NV507C_SET_BASE_LUT_LO_ENABLE 31:30
+#define NV507C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000)
+#define NV507C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001)
+#define NV507C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000003)
+#define NV507C_SET_BASE_LUT_LO_MODE 29:29
+#define NV507C_SET_BASE_LUT_LO_MODE_LORES (0x00000000)
+#define NV507C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001)
+#define NV507C_SET_BASE_LUT_LO_ORIGIN 7:2
+#define NV507C_SET_PROCESSING (0x00000110)
+#define NV507C_SET_PROCESSING_USE_GAIN_OFS 0:0
+#define NV507C_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000)
+#define NV507C_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001)
+#define NV507C_SET_CONVERSION (0x00000114)
+#define NV507C_SET_CONVERSION_GAIN 15:0
+#define NV507C_SET_CONVERSION_OFS 31:16
+
+#define NV507C_SURFACE_SET_OFFSET(a,b) (0x00000800 + (a)*0x00000020 + (b)*0x00000004)
+#define NV507C_SURFACE_SET_OFFSET_ORIGIN 31:0
+#define NV507C_SURFACE_SET_SIZE(a) (0x00000808 + (a)*0x00000020)
+#define NV507C_SURFACE_SET_SIZE_WIDTH 14:0
+#define NV507C_SURFACE_SET_SIZE_HEIGHT 30:16
+#define NV507C_SURFACE_SET_STORAGE(a) (0x0000080C + (a)*0x00000020)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV507C_SURFACE_SET_STORAGE_PITCH 17:8
+#define NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 20:20
+#define NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV507C_SURFACE_SET_PARAMS(a) (0x00000810 + (a)*0x00000020)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT 15:8
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NV507C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV507C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0
+#define NV507C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002)
+#define NV507C_SURFACE_SET_PARAMS_GAMMA 2:2
+#define NV507C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001)
+#define NV507C_SURFACE_SET_PARAMS_LAYOUT 5:4
+#define NV507C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001)
+#define NV507C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002)
+#define NV507C_SURFACE_SET_PARAMS_KIND 22:16
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_PITCH (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_8BX2 (0x00000070)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_8BX2_BANKSWIZ (0x00000072)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_16BX1 (0x00000074)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_16BX1_BANKSWIZ (0x00000076)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C32_MS4 (0x00000078)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C32_MS8 (0x00000079)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C32_MS4_BANKSWIZ (0x0000007A)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C32_MS8_BANKSWIZ (0x0000007B)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C64_MS4 (0x0000007C)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C64_MS8 (0x0000007D)
+#define NV507C_SURFACE_SET_PARAMS_KIND_KIND_C128_MS4 (0x0000007E)
+#define NV507C_SURFACE_SET_PARAMS_KIND_FROM_PTE (0x0000007F)
+#define NV507C_SURFACE_SET_PARAMS_PART_STRIDE 24:24
+#define NV507C_SURFACE_SET_PARAMS_PART_STRIDE_PARTSTRIDE_256 (0x00000000)
+#define NV507C_SURFACE_SET_PARAMS_PART_STRIDE_PARTSTRIDE_1024 (0x00000001)
+#endif // _cl507c_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
new file mode 100644
index 000000000000..2e444bac701d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl507d_h_
+#define _cl507d_h_
+
+#define NV_DISP_CORE_NOTIFIER_1 0x00000000
+#define NV_DISP_CORE_NOTIFIER_1_SIZEOF 0x00000054
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0 0x00000000
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE 0:0
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE_FALSE 0x00000000
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE_TRUE 0x00000001
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_R0 15:1
+#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_TIMESTAMP 29:16
+
+
+// class methods
+#define NV507D_UPDATE (0x00000080)
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR1 8:8
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE0 1:1
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE1 9:9
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY1 10:10
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 11:11
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000)
+#define NV507D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001)
+#define NV507D_UPDATE_NOT_DRIVER_FRIENDLY 31:31
+#define NV507D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000)
+#define NV507D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001)
+#define NV507D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30
+#define NV507D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000)
+#define NV507D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001)
+#define NV507D_UPDATE_INHIBIT_INTERRUPTS 29:29
+#define NV507D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000)
+#define NV507D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001)
+#define NV507D_SET_NOTIFIER_CONTROL (0x00000084)
+#define NV507D_SET_NOTIFIER_CONTROL_MODE 30:30
+#define NV507D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NV507D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NV507D_SET_NOTIFIER_CONTROL_OFFSET 11:2
+#define NV507D_SET_NOTIFIER_CONTROL_NOTIFY 31:31
+#define NV507D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000)
+#define NV507D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001)
+#define NV507D_SET_CONTEXT_DMA_NOTIFIER (0x00000088)
+#define NV507D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0
+#define NV507D_GET_CAPABILITIES (0x0000008C)
+#define NV507D_GET_CAPABILITIES_DUMMY 31:0
+
+#define NV507D_DAC_SET_CONTROL(a) (0x00000400 + (a)*0x00000080)
+#define NV507D_DAC_SET_CONTROL_OWNER 3:0
+#define NV507D_DAC_SET_CONTROL_OWNER_NONE (0x00000000)
+#define NV507D_DAC_SET_CONTROL_OWNER_HEAD0 (0x00000001)
+#define NV507D_DAC_SET_CONTROL_OWNER_HEAD1 (0x00000002)
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER 5:4
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER_NONE (0x00000000)
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER_SUBHEAD0 (0x00000001)
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER_SUBHEAD1 (0x00000002)
+#define NV507D_DAC_SET_CONTROL_SUB_OWNER_BOTH (0x00000003)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL 13:8
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_NTSC_M (0x00000001)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_NTSC_J (0x00000002)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_PAL_BDGHI (0x00000003)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_PAL_M (0x00000004)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_PAL_N (0x00000005)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CPST_PAL_CN (0x00000006)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_NTSC_M (0x00000007)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_NTSC_J (0x00000008)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_PAL_BDGHI (0x00000009)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_PAL_M (0x0000000A)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_PAL_N (0x0000000B)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_PAL_CN (0x0000000C)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_480P_60 (0x0000000D)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_576P_50 (0x0000000E)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_720P_50 (0x0000000F)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_720P_60 (0x00000010)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_1080I_50 (0x00000011)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_COMP_1080I_60 (0x00000012)
+#define NV507D_DAC_SET_CONTROL_PROTOCOL_CUSTOM (0x0000003F)
+#define NV507D_DAC_SET_CONTROL_INVALIDATE_FIRST_FIELD 14:14
+#define NV507D_DAC_SET_CONTROL_INVALIDATE_FIRST_FIELD_FALSE (0x00000000)
+#define NV507D_DAC_SET_CONTROL_INVALIDATE_FIRST_FIELD_TRUE (0x00000001)
+#define NV507D_DAC_SET_POLARITY(a) (0x00000404 + (a)*0x00000080)
+#define NV507D_DAC_SET_POLARITY_HSYNC 0:0
+#define NV507D_DAC_SET_POLARITY_HSYNC_POSITIVE_TRUE (0x00000000)
+#define NV507D_DAC_SET_POLARITY_HSYNC_NEGATIVE_TRUE (0x00000001)
+#define NV507D_DAC_SET_POLARITY_VSYNC 1:1
+#define NV507D_DAC_SET_POLARITY_VSYNC_POSITIVE_TRUE (0x00000000)
+#define NV507D_DAC_SET_POLARITY_VSYNC_NEGATIVE_TRUE (0x00000001)
+#define NV507D_DAC_SET_POLARITY_RESERVED 31:2
+
+#define NV507D_SOR_SET_CONTROL(a) (0x00000600 + (a)*0x00000040)
+#define NV507D_SOR_SET_CONTROL_OWNER 3:0
+#define NV507D_SOR_SET_CONTROL_OWNER_NONE (0x00000000)
+#define NV507D_SOR_SET_CONTROL_OWNER_HEAD0 (0x00000001)
+#define NV507D_SOR_SET_CONTROL_OWNER_HEAD1 (0x00000002)
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER 5:4
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER_NONE (0x00000000)
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD0 (0x00000001)
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD1 (0x00000002)
+#define NV507D_SOR_SET_CONTROL_SUB_OWNER_BOTH (0x00000003)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_AB (0x00000003)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_SINGLE_TMDS (0x00000004)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_DDI_OUT (0x00000007)
+#define NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NV507D_SOR_SET_CONTROL_HSYNC_POLARITY 12:12
+#define NV507D_SOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV507D_SOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV507D_SOR_SET_CONTROL_VSYNC_POLARITY 13:13
+#define NV507D_SOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV507D_SOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV507D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14
+#define NV507D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV507D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+
+#define NV507D_PIOR_SET_CONTROL(a) (0x00000700 + (a)*0x00000040)
+#define NV507D_PIOR_SET_CONTROL_OWNER 3:0
+#define NV507D_PIOR_SET_CONTROL_OWNER_NONE (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_OWNER_HEAD0 (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_OWNER_HEAD1 (0x00000002)
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER 5:4
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER_NONE (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER_SUBHEAD0 (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER_SUBHEAD1 (0x00000002)
+#define NV507D_PIOR_SET_CONTROL_SUB_OWNER_BOTH (0x00000003)
+#define NV507D_PIOR_SET_CONTROL_PROTOCOL 11:8
+#define NV507D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_HSYNC_POLARITY 12:12
+#define NV507D_PIOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_VSYNC_POLARITY 13:13
+#define NV507D_PIOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV507D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14
+#define NV507D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV507D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+
+#define NV507D_HEAD_SET_PIXEL_CLOCK(a) (0x00000804 + (a)*0x00000400)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_FREQUENCY 21:0
+#define NV507D_HEAD_SET_PIXEL_CLOCK_MODE 23:22
+#define NV507D_HEAD_SET_PIXEL_CLOCK_MODE_CLK_25 (0x00000000)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_MODE_CLK_28 (0x00000001)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_MODE_CLK_CUSTOM (0x00000002)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_ADJ1000DIV1001 24:24
+#define NV507D_HEAD_SET_PIXEL_CLOCK_ADJ1000DIV1001_FALSE (0x00000000)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_ADJ1000DIV1001_TRUE (0x00000001)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_NOT_DRIVER 25:25
+#define NV507D_HEAD_SET_PIXEL_CLOCK_NOT_DRIVER_FALSE (0x00000000)
+#define NV507D_HEAD_SET_PIXEL_CLOCK_NOT_DRIVER_TRUE (0x00000001)
+#define NV507D_HEAD_SET_CONTROL(a) (0x00000808 + (a)*0x00000400)
+#define NV507D_HEAD_SET_CONTROL_STRUCTURE 2:1
+#define NV507D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001)
+#define NV507D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000810 + (a)*0x00000400)
+#define NV507D_HEAD_SET_OVERSCAN_COLOR_RED 9:0
+#define NV507D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10
+#define NV507D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20
+#define NV507D_HEAD_SET_RASTER_SIZE(a) (0x00000814 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_SIZE_WIDTH 14:0
+#define NV507D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16
+#define NV507D_HEAD_SET_RASTER_SYNC_END(a) (0x00000818 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_SYNC_END_X 14:0
+#define NV507D_HEAD_SET_RASTER_SYNC_END_Y 30:16
+#define NV507D_HEAD_SET_RASTER_BLANK_END(a) (0x0000081C + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_BLANK_END_X 14:0
+#define NV507D_HEAD_SET_RASTER_BLANK_END_Y 30:16
+#define NV507D_HEAD_SET_RASTER_BLANK_START(a) (0x00000820 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_BLANK_START_X 14:0
+#define NV507D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000824 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK_DMI(a) (0x00000828 + (a)*0x00000400)
+#define NV507D_HEAD_SET_RASTER_VERT_BLANK_DMI_DURATION 11:0
+#define NV507D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000082C + (a)*0x00000400)
+#define NV507D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0
+#define NV507D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10
+#define NV507D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20
+#define NV507D_HEAD_SET_BASE_LUT_LO(a) (0x00000840 + (a)*0x00000400)
+#define NV507D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31
+#define NV507D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000)
+#define NV507D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001)
+#define NV507D_HEAD_SET_BASE_LUT_LO_MODE 30:30
+#define NV507D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000)
+#define NV507D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001)
+#define NV507D_HEAD_SET_BASE_LUT_LO_ORIGIN 7:2
+#define NV507D_HEAD_SET_BASE_LUT_HI(a) (0x00000844 + (a)*0x00000400)
+#define NV507D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0
+#define NV507D_HEAD_SET_OFFSET(a,b) (0x00000860 + (a)*0x00000400 + (b)*0x00000004)
+#define NV507D_HEAD_SET_OFFSET_ORIGIN 31:0
+#define NV507D_HEAD_SET_SIZE(a) (0x00000868 + (a)*0x00000400)
+#define NV507D_HEAD_SET_SIZE_WIDTH 14:0
+#define NV507D_HEAD_SET_SIZE_HEIGHT 30:16
+#define NV507D_HEAD_SET_STORAGE(a) (0x0000086C + (a)*0x00000400)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV507D_HEAD_SET_STORAGE_PITCH 17:8
+#define NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT 20:20
+#define NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV507D_HEAD_SET_PARAMS(a) (0x00000870 + (a)*0x00000400)
+#define NV507D_HEAD_SET_PARAMS_FORMAT 15:8
+#define NV507D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NV507D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV507D_HEAD_SET_PARAMS_KIND 22:16
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_PITCH (0x00000000)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_GENERIC_8BX2 (0x00000070)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_GENERIC_8BX2_BANKSWIZ (0x00000072)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_GENERIC_16BX1 (0x00000074)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_GENERIC_16BX1_BANKSWIZ (0x00000076)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C32_MS4 (0x00000078)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C32_MS8 (0x00000079)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C32_MS4_BANKSWIZ (0x0000007A)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C32_MS8_BANKSWIZ (0x0000007B)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C64_MS4 (0x0000007C)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C64_MS8 (0x0000007D)
+#define NV507D_HEAD_SET_PARAMS_KIND_KIND_C128_MS4 (0x0000007E)
+#define NV507D_HEAD_SET_PARAMS_KIND_FROM_PTE (0x0000007F)
+#define NV507D_HEAD_SET_PARAMS_PART_STRIDE 24:24
+#define NV507D_HEAD_SET_PARAMS_PART_STRIDE_PARTSTRIDE_256 (0x00000000)
+#define NV507D_HEAD_SET_PARAMS_PART_STRIDE_PARTSTRIDE_1024 (0x00000001)
+#define NV507D_HEAD_SET_CONTEXT_DMA_ISO(a) (0x00000874 + (a)*0x00000400)
+#define NV507D_HEAD_SET_CONTEXT_DMA_ISO_HANDLE 31:0
+#define NV507D_HEAD_SET_CONTROL_CURSOR(a) (0x00000880 + (a)*0x00000400)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NV507D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24
+#define NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SIZE 26:26
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 13:8
+#define NV507D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 21:16
+#define NV507D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28
+#define NV507D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER 5:4
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_NONE (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_SUBHEAD0 (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_SUBHEAD1 (0x00000002)
+#define NV507D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_BOTH (0x00000003)
+#define NV507D_HEAD_SET_OFFSET_CURSOR(a) (0x00000884 + (a)*0x00000400)
+#define NV507D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV507D_HEAD_SET_DITHER_CONTROL(a) (0x000008A0 + (a)*0x00000400)
+#define NV507D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
+#define NV507D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
+#define NV507D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001)
+#define NV507D_HEAD_SET_DITHER_CONTROL_BITS 2:1
+#define NV507D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000)
+#define NV507D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001)
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE 6:3
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000)
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001)
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002)
+#define NV507D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003)
+#define NV507D_HEAD_SET_DITHER_CONTROL_PHASE 8:7
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x000008A4 + (a)*0x00000400)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002)
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16
+#define NV507D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24
+#define NV507D_HEAD_SET_PROCAMP(a) (0x000008A8 + (a)*0x00000400)
+#define NV507D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0
+#define NV507D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000)
+#define NV507D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001)
+#define NV507D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002)
+#define NV507D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2
+#define NV507D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000)
+#define NV507D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001)
+#define NV507D_HEAD_SET_PROCAMP_SAT_COS 19:8
+#define NV507D_HEAD_SET_PROCAMP_SAT_SINE 31:20
+#define NV507D_HEAD_SET_PROCAMP_TRANSITION 4:3
+#define NV507D_HEAD_SET_PROCAMP_TRANSITION_HARD (0x00000000)
+#define NV507D_HEAD_SET_PROCAMP_TRANSITION_NTSC (0x00000001)
+#define NV507D_HEAD_SET_PROCAMP_TRANSITION_PAL (0x00000002)
+#define NV507D_HEAD_SET_VIEWPORT_POINT_IN(a,b) (0x000008C0 + (a)*0x00000400 + (b)*0x00000004)
+#define NV507D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0
+#define NV507D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000008C8 + (a)*0x00000400)
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000008D8 + (a)*0x00000400)
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000008DC + (a)*0x00000400)
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0
+#define NV507D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x00000900 + (a)*0x00000400)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000)
+#define NV507D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x00000904 + (a)*0x00000400)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001)
+#define NV507D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003)
+#endif // _cl507d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507e.h
new file mode 100644
index 000000000000..1f432b43cbb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507e.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl507e_h_
+#define _cl507e_h_
+
+// class methods
+#define NV507E_SET_PRESENT_CONTROL (0x00000084)
+#define NV507E_SET_PRESENT_CONTROL_BEGIN_MODE 1:0
+#define NV507E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP (0x00000000)
+#define NV507E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP (0x00000003)
+#define NV507E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4
+#define NV507E_SET_CONTEXT_DMA_ISO (0x000000C0)
+#define NV507E_SET_CONTEXT_DMA_ISO_HANDLE 31:0
+#define NV507E_SET_POINT_IN (0x000000E0)
+#define NV507E_SET_POINT_IN_X 14:0
+#define NV507E_SET_POINT_IN_Y 30:16
+#define NV507E_SET_SIZE_IN (0x000000E4)
+#define NV507E_SET_SIZE_IN_WIDTH 14:0
+#define NV507E_SET_SIZE_IN_HEIGHT 30:16
+#define NV507E_SET_SIZE_OUT (0x000000E8)
+#define NV507E_SET_SIZE_OUT_WIDTH 14:0
+#define NV507E_SET_COMPOSITION_CONTROL (0x00000100)
+#define NV507E_SET_COMPOSITION_CONTROL_MODE 3:0
+#define NV507E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING (0x00000000)
+#define NV507E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING (0x00000001)
+#define NV507E_SET_COMPOSITION_CONTROL_MODE_OPAQUE_SUSPEND_BASE (0x00000002)
+
+#define NV507E_SURFACE_SET_OFFSET (0x00000800)
+#define NV507E_SURFACE_SET_OFFSET_ORIGIN 31:0
+#define NV507E_SURFACE_SET_SIZE (0x00000808)
+#define NV507E_SURFACE_SET_SIZE_WIDTH 14:0
+#define NV507E_SURFACE_SET_SIZE_HEIGHT 30:16
+#define NV507E_SURFACE_SET_STORAGE (0x0000080C)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV507E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV507E_SURFACE_SET_STORAGE_PITCH 17:8
+#define NV507E_SURFACE_SET_STORAGE_MEMORY_LAYOUT 20:20
+#define NV507E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV507E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV507E_SURFACE_SET_PARAMS (0x00000810)
+#define NV507E_SURFACE_SET_PARAMS_FORMAT 15:8
+#define NV507E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8 (0x00000028)
+#define NV507E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8 (0x00000029)
+#define NV507E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV507E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV507E_SURFACE_SET_PARAMS_COLOR_SPACE 1:0
+#define NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB (0x00000000)
+#define NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001)
+#define NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002)
+#define NV507E_SURFACE_SET_PARAMS_KIND 22:16
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_PITCH (0x00000000)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_8BX2 (0x00000070)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_8BX2_BANKSWIZ (0x00000072)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_16BX1 (0x00000074)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_GENERIC_16BX1_BANKSWIZ (0x00000076)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C32_MS4 (0x00000078)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C32_MS8 (0x00000079)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C32_MS4_BANKSWIZ (0x0000007A)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C32_MS8_BANKSWIZ (0x0000007B)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C64_MS4 (0x0000007C)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C64_MS8 (0x0000007D)
+#define NV507E_SURFACE_SET_PARAMS_KIND_KIND_C128_MS4 (0x0000007E)
+#define NV507E_SURFACE_SET_PARAMS_KIND_FROM_PTE (0x0000007F)
+#define NV507E_SURFACE_SET_PARAMS_PART_STRIDE 24:24
+#define NV507E_SURFACE_SET_PARAMS_PART_STRIDE_PARTSTRIDE_256 (0x00000000)
+#define NV507E_SURFACE_SET_PARAMS_PART_STRIDE_PARTSTRIDE_1024 (0x00000001)
+#endif // _cl507e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl826f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl826f.h
new file mode 100644
index 000000000000..8e7c0fbbbdb8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl826f.h
@@ -0,0 +1,39 @@
+/*******************************************************************************
+ Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl826f_h_
+#define _cl826f_h_
+
+#define NV826F_SEMAPHOREA (0x00000010)
+#define NV826F_SEMAPHOREA_OFFSET_UPPER 7:0
+#define NV826F_SEMAPHOREB (0x00000014)
+#define NV826F_SEMAPHOREB_OFFSET_LOWER 31:00
+#define NV826F_SEMAPHOREC (0x00000018)
+#define NV826F_SEMAPHOREC_PAYLOAD 31:0
+#define NV826F_SEMAPHORED (0x0000001C)
+#define NV826F_SEMAPHORED_OPERATION 2:0
+#define NV826F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001
+#define NV826F_SEMAPHORED_OPERATION_RELEASE 0x00000002
+#define NV826F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004
+#define NV826F_NON_STALLED_INTERRUPT (0x00000020)
+#define NV826F_SET_CONTEXT_DMA_SEMAPHORE (0x00000060)
+#endif /* _cl826f_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl827c.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827c.h
new file mode 100644
index 000000000000..4b8938ee34a4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827c.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl827c_h_
+#define _cl827c_h_
+
+// class methods
+#define NV827C_SET_PRESENT_CONTROL (0x00000084)
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002)
+#define NV827C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4
+#define NV827C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16
+#define NV827C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10
+#define NV827C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004)
+#define NV827C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0
+#define NV827C_SET_PROCESSING (0x00000110)
+#define NV827C_SET_PROCESSING_USE_GAIN_OFS 0:0
+#define NV827C_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000)
+#define NV827C_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001)
+#define NV827C_SET_CONVERSION (0x00000114)
+#define NV827C_SET_CONVERSION_GAIN 15:0
+#define NV827C_SET_CONVERSION_OFS 31:16
+
+#define NV827C_SURFACE_SET_OFFSET(a,b) (0x00000800 + (a)*0x00000020 + (b)*0x00000004)
+#define NV827C_SURFACE_SET_OFFSET_ORIGIN 31:0
+#define NV827C_SURFACE_SET_SIZE(a) (0x00000808 + (a)*0x00000020)
+#define NV827C_SURFACE_SET_SIZE_WIDTH 14:0
+#define NV827C_SURFACE_SET_SIZE_HEIGHT 30:16
+#define NV827C_SURFACE_SET_STORAGE(a) (0x0000080C + (a)*0x00000020)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV827C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV827C_SURFACE_SET_STORAGE_PITCH 17:8
+#define NV827C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 20:20
+#define NV827C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV827C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV827C_SURFACE_SET_PARAMS(a) (0x00000810 + (a)*0x00000020)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT 15:8
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NV827C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV827C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0
+#define NV827C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000)
+#define NV827C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002)
+#define NV827C_SURFACE_SET_PARAMS_GAMMA 2:2
+#define NV827C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000)
+#define NV827C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001)
+#define NV827C_SURFACE_SET_PARAMS_LAYOUT 5:4
+#define NV827C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000)
+#define NV827C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001)
+#define NV827C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002)
+#define NV827C_SURFACE_SET_PARAMS_RESERVED0 22:16
+#define NV827C_SURFACE_SET_PARAMS_RESERVED1 24:24
+#endif // _cl827c_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl827d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827d.h
new file mode 100644
index 000000000000..5da5d5579dab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827d.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl827d_h_
+#define _cl827d_h_
+
+// class methods
+#define NV827D_HEAD_SET_BASE_LUT_LO(a) (0x00000840 + (a)*0x00000400)
+#define NV827D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31
+#define NV827D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000)
+#define NV827D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001)
+#define NV827D_HEAD_SET_BASE_LUT_LO_MODE 30:30
+#define NV827D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000)
+#define NV827D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001)
+#define NV827D_HEAD_SET_BASE_LUT_LO_ORIGIN 7:2
+#define NV827D_HEAD_SET_BASE_LUT_HI(a) (0x00000844 + (a)*0x00000400)
+#define NV827D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0
+#define NV827D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000085C + (a)*0x00000400)
+#define NV827D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0
+#define NV827D_HEAD_SET_OFFSET(a,b) (0x00000860 + (a)*0x00000400 + (b)*0x00000004)
+#define NV827D_HEAD_SET_OFFSET_ORIGIN 31:0
+#define NV827D_HEAD_SET_SIZE(a) (0x00000868 + (a)*0x00000400)
+#define NV827D_HEAD_SET_SIZE_WIDTH 14:0
+#define NV827D_HEAD_SET_SIZE_HEIGHT 30:16
+#define NV827D_HEAD_SET_STORAGE(a) (0x0000086C + (a)*0x00000400)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV827D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV827D_HEAD_SET_STORAGE_PITCH 17:8
+#define NV827D_HEAD_SET_STORAGE_MEMORY_LAYOUT 20:20
+#define NV827D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV827D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV827D_HEAD_SET_PARAMS(a) (0x00000870 + (a)*0x00000400)
+#define NV827D_HEAD_SET_PARAMS_FORMAT 15:8
+#define NV827D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NV827D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV827D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0
+#define NV827D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000)
+#define NV827D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002)
+#define NV827D_HEAD_SET_PARAMS_GAMMA 2:2
+#define NV827D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000)
+#define NV827D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001)
+#define NV827D_HEAD_SET_PARAMS_RESERVED0 22:16
+#define NV827D_HEAD_SET_PARAMS_RESERVED1 24:24
+#define NV827D_HEAD_SET_CONTEXT_DMAS_ISO(a,b) (0x00000874 + (a)*0x00000400 + (b)*0x00000004)
+#define NV827D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0
+#define NV827D_HEAD_SET_CONTROL_CURSOR(a) (0x00000880 + (a)*0x00000400)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NV827D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24
+#define NV827D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SIZE 26:26
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 13:8
+#define NV827D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 21:16
+#define NV827D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28
+#define NV827D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER 5:4
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_NONE (0x00000000)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_SUBHEAD0 (0x00000001)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_SUBHEAD1 (0x00000002)
+#define NV827D_HEAD_SET_CONTROL_CURSOR_SUB_OWNER_BOTH (0x00000003)
+#define NV827D_HEAD_SET_OFFSET_CURSOR(a) (0x00000884 + (a)*0x00000400)
+#define NV827D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV827D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000089C + (a)*0x00000400)
+#define NV827D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
+#define NV827D_HEAD_SET_VIEWPORT_POINT_IN(a,b) (0x000008C0 + (a)*0x00000400 + (b)*0x00000004)
+#define NV827D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0
+#define NV827D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16
+#endif // _cl827d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl827e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827e.h
new file mode 100644
index 000000000000..8cae7a53d14d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl827e.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl827e_h_
+#define _cl827e_h_
+
+#define NV_DISP_NOTIFICATION_1 0x00000000
+#define NV_DISP_NOTIFICATION_1_SIZEOF 0x00000010
+#define NV_DISP_NOTIFICATION_1_TIME_STAMP_0 0x00000000
+#define NV_DISP_NOTIFICATION_1_TIME_STAMP_0_NANOSECONDS0 31:0
+#define NV_DISP_NOTIFICATION_1_TIME_STAMP_1 0x00000001
+#define NV_DISP_NOTIFICATION_1_TIME_STAMP_1_NANOSECONDS1 31:0
+#define NV_DISP_NOTIFICATION_1__2 0x00000002
+#define NV_DISP_NOTIFICATION_1__2_AUDIT_TIMESTAMP 31:0
+#define NV_DISP_NOTIFICATION_1__3 0x00000003
+#define NV_DISP_NOTIFICATION_1__3_PRESENT_COUNT 7:0
+#define NV_DISP_NOTIFICATION_1__3_R0 15:8
+#define NV_DISP_NOTIFICATION_1__3_STATUS 31:16
+#define NV_DISP_NOTIFICATION_1__3_STATUS_NOT_BEGUN 0x00008000
+#define NV_DISP_NOTIFICATION_1__3_STATUS_BEGUN 0x0000FFFF
+#define NV_DISP_NOTIFICATION_1__3_STATUS_FINISHED 0x00000000
+
+
+// class methods
+#define NV827E_SET_PRESENT_CONTROL (0x00000084)
+#define NV827E_SET_PRESENT_CONTROL_BEGIN_MODE 1:0
+#define NV827E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP (0x00000000)
+#define NV827E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP (0x00000003)
+#define NV827E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4
+#define NV827E_SET_CONTEXT_DMA_ISO (0x000000C0)
+#define NV827E_SET_CONTEXT_DMA_ISO_HANDLE 31:0
+#define NV827E_SET_COMPOSITION_CONTROL (0x00000100)
+#define NV827E_SET_COMPOSITION_CONTROL_MODE 3:0
+#define NV827E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING (0x00000000)
+#define NV827E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING (0x00000001)
+#define NV827E_SET_COMPOSITION_CONTROL_MODE_OPAQUE_SUSPEND_BASE (0x00000002)
+
+#define NV827E_SURFACE_SET_OFFSET (0x00000800)
+#define NV827E_SURFACE_SET_OFFSET_ORIGIN 31:0
+#define NV827E_SURFACE_SET_SIZE (0x00000808)
+#define NV827E_SURFACE_SET_SIZE_WIDTH 14:0
+#define NV827E_SURFACE_SET_SIZE_HEIGHT 30:16
+#define NV827E_SURFACE_SET_STORAGE (0x0000080C)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV827E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV827E_SURFACE_SET_STORAGE_PITCH 17:8
+#define NV827E_SURFACE_SET_STORAGE_MEMORY_LAYOUT 20:20
+#define NV827E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV827E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV827E_SURFACE_SET_PARAMS (0x00000810)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT 15:8
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8 (0x00000028)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8 (0x00000029)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV827E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV827E_SURFACE_SET_PARAMS_COLOR_SPACE 1:0
+#define NV827E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB (0x00000000)
+#define NV827E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001)
+#define NV827E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002)
+#define NV827E_SURFACE_SET_PARAMS_RESERVED0 22:16
+#define NV827E_SURFACE_SET_PARAMS_RESERVED1 24:24
+#endif // _cl827e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl837d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl837d.h
new file mode 100644
index 000000000000..0db9d4e730ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl837d.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl837d_h_
+#define _cl837d_h_
+
+// class methods
+#define NV837D_SOR_SET_CONTROL(a) (0x00000600 + (a)*0x00000040)
+#define NV837D_SOR_SET_CONTROL_OWNER 3:0
+#define NV837D_SOR_SET_CONTROL_OWNER_NONE (0x00000000)
+#define NV837D_SOR_SET_CONTROL_OWNER_HEAD0 (0x00000001)
+#define NV837D_SOR_SET_CONTROL_OWNER_HEAD1 (0x00000002)
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER 5:4
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER_NONE (0x00000000)
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD0 (0x00000001)
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD1 (0x00000002)
+#define NV837D_SOR_SET_CONTROL_SUB_OWNER_BOTH (0x00000003)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_AB (0x00000003)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_DUAL_SINGLE_TMDS (0x00000004)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_DDI_OUT (0x00000007)
+#define NV837D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NV837D_SOR_SET_CONTROL_HSYNC_POLARITY 12:12
+#define NV837D_SOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV837D_SOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV837D_SOR_SET_CONTROL_VSYNC_POLARITY 13:13
+#define NV837D_SOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV837D_SOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV837D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14
+#define NV837D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV837D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH 19:16
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT (0x00000000)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_16_422 (0x00000001)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444 (0x00000002)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_20_422 (0x00000003)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_422 (0x00000004)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444 (0x00000005)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444 (0x00000006)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_32_422 (0x00000007)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_36_444 (0x00000008)
+#define NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_48_444 (0x00000009)
+
+#define NV837D_PIOR_SET_CONTROL(a) (0x00000700 + (a)*0x00000040)
+#define NV837D_PIOR_SET_CONTROL_OWNER 3:0
+#define NV837D_PIOR_SET_CONTROL_OWNER_NONE (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_OWNER_HEAD0 (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_OWNER_HEAD1 (0x00000002)
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER 5:4
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER_NONE (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER_SUBHEAD0 (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER_SUBHEAD1 (0x00000002)
+#define NV837D_PIOR_SET_CONTROL_SUB_OWNER_BOTH (0x00000003)
+#define NV837D_PIOR_SET_CONTROL_PROTOCOL 11:8
+#define NV837D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_HSYNC_POLARITY 12:12
+#define NV837D_PIOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_VSYNC_POLARITY 13:13
+#define NV837D_PIOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14
+#define NV837D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH 19:16
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT (0x00000000)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_16_422 (0x00000001)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444 (0x00000002)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_20_422 (0x00000003)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_422 (0x00000004)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444 (0x00000005)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444 (0x00000006)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_32_422 (0x00000007)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_36_444 (0x00000008)
+#define NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_48_444 (0x00000009)
+#endif // _cl837d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl887d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl887d.h
new file mode 100644
index 000000000000..c93efc642a92
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl887d.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl887d_h_
+#define _cl887d_h_
+
+#define NV887D_SOR_SET_CONTROL(a) (0x00000600 + (a)*0x00000040)
+#define NV887D_SOR_SET_CONTROL_OWNER 3:0
+#define NV887D_SOR_SET_CONTROL_OWNER_NONE (0x00000000)
+#define NV887D_SOR_SET_CONTROL_OWNER_HEAD0 (0x00000001)
+#define NV887D_SOR_SET_CONTROL_OWNER_HEAD1 (0x00000002)
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER 5:4
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER_NONE (0x00000000)
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD0 (0x00000001)
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER_SUBHEAD1 (0x00000002)
+#define NV887D_SOR_SET_CONTROL_SUB_OWNER_BOTH (0x00000003)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_AB (0x00000003)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DUAL_SINGLE_TMDS (0x00000004)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DDI_OUT (0x00000007)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009)
+#define NV887D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NV887D_SOR_SET_CONTROL_HSYNC_POLARITY 12:12
+#define NV887D_SOR_SET_CONTROL_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV887D_SOR_SET_CONTROL_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV887D_SOR_SET_CONTROL_VSYNC_POLARITY 13:13
+#define NV887D_SOR_SET_CONTROL_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV887D_SOR_SET_CONTROL_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV887D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14
+#define NV887D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV887D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH 19:16
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT (0x00000000)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_16_422 (0x00000001)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444 (0x00000002)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_20_422 (0x00000003)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_422 (0x00000004)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444 (0x00000005)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444 (0x00000006)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_32_422 (0x00000007)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_36_444 (0x00000008)
+#define NV887D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_48_444 (0x00000009)
+#endif // _cl887d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl902d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl902d.h
new file mode 100644
index 000000000000..8d0b42c04e3e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl902d.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2003 - 2004, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl_fermi_twod_a_h_
+#define _cl_fermi_twod_a_h_
+
+#define NV902D_SET_OBJECT 0x0000
+#define NV902D_SET_OBJECT_CLASS_ID 15:0
+#define NV902D_SET_OBJECT_ENGINE_ID 20:16
+
+#define NV902D_WAIT_FOR_IDLE 0x0110
+#define NV902D_WAIT_FOR_IDLE_V 31:0
+
+#define NV902D_SET_DST_FORMAT 0x0200
+#define NV902D_SET_DST_FORMAT_V 7:0
+#define NV902D_SET_DST_FORMAT_V_A8R8G8B8 0x000000CF
+#define NV902D_SET_DST_FORMAT_V_A8RL8GL8BL8 0x000000D0
+#define NV902D_SET_DST_FORMAT_V_A2R10G10B10 0x000000DF
+#define NV902D_SET_DST_FORMAT_V_A8B8G8R8 0x000000D5
+#define NV902D_SET_DST_FORMAT_V_A8BL8GL8RL8 0x000000D6
+#define NV902D_SET_DST_FORMAT_V_A2B10G10R10 0x000000D1
+#define NV902D_SET_DST_FORMAT_V_X8R8G8B8 0x000000E6
+#define NV902D_SET_DST_FORMAT_V_X8RL8GL8BL8 0x000000E7
+#define NV902D_SET_DST_FORMAT_V_X8B8G8R8 0x000000F9
+#define NV902D_SET_DST_FORMAT_V_X8BL8GL8RL8 0x000000FA
+#define NV902D_SET_DST_FORMAT_V_R5G6B5 0x000000E8
+#define NV902D_SET_DST_FORMAT_V_A1R5G5B5 0x000000E9
+#define NV902D_SET_DST_FORMAT_V_X1R5G5B5 0x000000F8
+#define NV902D_SET_DST_FORMAT_V_Y8 0x000000F3
+#define NV902D_SET_DST_FORMAT_V_Y16 0x000000EE
+#define NV902D_SET_DST_FORMAT_V_Y32 0x000000FF
+#define NV902D_SET_DST_FORMAT_V_Z1R5G5B5 0x000000FB
+#define NV902D_SET_DST_FORMAT_V_O1R5G5B5 0x000000FC
+#define NV902D_SET_DST_FORMAT_V_Z8R8G8B8 0x000000FD
+#define NV902D_SET_DST_FORMAT_V_O8R8G8B8 0x000000FE
+#define NV902D_SET_DST_FORMAT_V_Y1_8X8 0x0000001C
+#define NV902D_SET_DST_FORMAT_V_RF16 0x000000F2
+#define NV902D_SET_DST_FORMAT_V_RF32 0x000000E5
+#define NV902D_SET_DST_FORMAT_V_RF32_GF32 0x000000CB
+#define NV902D_SET_DST_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA
+#define NV902D_SET_DST_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE
+#define NV902D_SET_DST_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0
+#define NV902D_SET_DST_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3
+#define NV902D_SET_DST_FORMAT_V_R16_G16_B16_A16 0x000000C6
+#define NV902D_SET_DST_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7
+#define NV902D_SET_DST_FORMAT_V_BF10GF11RF11 0x000000E0
+#define NV902D_SET_DST_FORMAT_V_AN8BN8GN8RN8 0x000000D7
+#define NV902D_SET_DST_FORMAT_V_RF16_GF16 0x000000DE
+#define NV902D_SET_DST_FORMAT_V_R16_G16 0x000000DA
+#define NV902D_SET_DST_FORMAT_V_RN16_GN16 0x000000DB
+#define NV902D_SET_DST_FORMAT_V_G8R8 0x000000EA
+#define NV902D_SET_DST_FORMAT_V_GN8RN8 0x000000EB
+#define NV902D_SET_DST_FORMAT_V_RN16 0x000000EF
+#define NV902D_SET_DST_FORMAT_V_RN8 0x000000F4
+#define NV902D_SET_DST_FORMAT_V_A8 0x000000F7
+
+#define NV902D_SET_DST_MEMORY_LAYOUT 0x0204
+#define NV902D_SET_DST_MEMORY_LAYOUT_V 0:0
+#define NV902D_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000
+#define NV902D_SET_DST_MEMORY_LAYOUT_V_PITCH 0x00000001
+
+#define NV902D_SET_DST_PITCH 0x0214
+#define NV902D_SET_DST_PITCH_V 31:0
+
+#define NV902D_SET_DST_WIDTH 0x0218
+#define NV902D_SET_DST_WIDTH_V 31:0
+
+#define NV902D_SET_DST_HEIGHT 0x021c
+#define NV902D_SET_DST_HEIGHT_V 31:0
+
+#define NV902D_SET_DST_OFFSET_UPPER 0x0220
+#define NV902D_SET_DST_OFFSET_UPPER_V 7:0
+
+#define NV902D_SET_DST_OFFSET_LOWER 0x0224
+#define NV902D_SET_DST_OFFSET_LOWER_V 31:0
+
+#define NV902D_SET_SRC_FORMAT 0x0230
+#define NV902D_SET_SRC_FORMAT_V 7:0
+#define NV902D_SET_SRC_FORMAT_V_A8R8G8B8 0x000000CF
+#define NV902D_SET_SRC_FORMAT_V_A8RL8GL8BL8 0x000000D0
+#define NV902D_SET_SRC_FORMAT_V_A2R10G10B10 0x000000DF
+#define NV902D_SET_SRC_FORMAT_V_A8B8G8R8 0x000000D5
+#define NV902D_SET_SRC_FORMAT_V_A8BL8GL8RL8 0x000000D6
+#define NV902D_SET_SRC_FORMAT_V_A2B10G10R10 0x000000D1
+#define NV902D_SET_SRC_FORMAT_V_X8R8G8B8 0x000000E6
+#define NV902D_SET_SRC_FORMAT_V_X8RL8GL8BL8 0x000000E7
+#define NV902D_SET_SRC_FORMAT_V_X8B8G8R8 0x000000F9
+#define NV902D_SET_SRC_FORMAT_V_X8BL8GL8RL8 0x000000FA
+#define NV902D_SET_SRC_FORMAT_V_R5G6B5 0x000000E8
+#define NV902D_SET_SRC_FORMAT_V_A1R5G5B5 0x000000E9
+#define NV902D_SET_SRC_FORMAT_V_X1R5G5B5 0x000000F8
+#define NV902D_SET_SRC_FORMAT_V_Y8 0x000000F3
+#define NV902D_SET_SRC_FORMAT_V_AY8 0x0000001D
+#define NV902D_SET_SRC_FORMAT_V_Y16 0x000000EE
+#define NV902D_SET_SRC_FORMAT_V_Y32 0x000000FF
+#define NV902D_SET_SRC_FORMAT_V_Z1R5G5B5 0x000000FB
+#define NV902D_SET_SRC_FORMAT_V_O1R5G5B5 0x000000FC
+#define NV902D_SET_SRC_FORMAT_V_Z8R8G8B8 0x000000FD
+#define NV902D_SET_SRC_FORMAT_V_O8R8G8B8 0x000000FE
+#define NV902D_SET_SRC_FORMAT_V_Y1_8X8 0x0000001C
+#define NV902D_SET_SRC_FORMAT_V_RF16 0x000000F2
+#define NV902D_SET_SRC_FORMAT_V_RF32 0x000000E5
+#define NV902D_SET_SRC_FORMAT_V_RF32_GF32 0x000000CB
+#define NV902D_SET_SRC_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA
+#define NV902D_SET_SRC_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE
+#define NV902D_SET_SRC_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0
+#define NV902D_SET_SRC_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3
+#define NV902D_SET_SRC_FORMAT_V_R16_G16_B16_A16 0x000000C6
+#define NV902D_SET_SRC_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7
+#define NV902D_SET_SRC_FORMAT_V_BF10GF11RF11 0x000000E0
+#define NV902D_SET_SRC_FORMAT_V_AN8BN8GN8RN8 0x000000D7
+#define NV902D_SET_SRC_FORMAT_V_RF16_GF16 0x000000DE
+#define NV902D_SET_SRC_FORMAT_V_R16_G16 0x000000DA
+#define NV902D_SET_SRC_FORMAT_V_RN16_GN16 0x000000DB
+#define NV902D_SET_SRC_FORMAT_V_G8R8 0x000000EA
+#define NV902D_SET_SRC_FORMAT_V_GN8RN8 0x000000EB
+#define NV902D_SET_SRC_FORMAT_V_RN16 0x000000EF
+#define NV902D_SET_SRC_FORMAT_V_RN8 0x000000F4
+#define NV902D_SET_SRC_FORMAT_V_A8 0x000000F7
+
+#define NV902D_SET_SRC_MEMORY_LAYOUT 0x0234
+#define NV902D_SET_SRC_MEMORY_LAYOUT_V 0:0
+#define NV902D_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000
+#define NV902D_SET_SRC_MEMORY_LAYOUT_V_PITCH 0x00000001
+
+#define NV902D_SET_SRC_PITCH 0x0244
+#define NV902D_SET_SRC_PITCH_V 31:0
+
+#define NV902D_SET_SRC_WIDTH 0x0248
+#define NV902D_SET_SRC_WIDTH_V 31:0
+
+#define NV902D_SET_SRC_HEIGHT 0x024c
+#define NV902D_SET_SRC_HEIGHT_V 31:0
+
+#define NV902D_SET_SRC_OFFSET_UPPER 0x0250
+#define NV902D_SET_SRC_OFFSET_UPPER_V 7:0
+
+#define NV902D_SET_SRC_OFFSET_LOWER 0x0254
+#define NV902D_SET_SRC_OFFSET_LOWER_V 31:0
+
+#define NV902D_SET_CLIP_ENABLE 0x0290
+#define NV902D_SET_CLIP_ENABLE_V 0:0
+#define NV902D_SET_CLIP_ENABLE_V_FALSE 0x00000000
+#define NV902D_SET_CLIP_ENABLE_V_TRUE 0x00000001
+
+#define NV902D_SET_ROP 0x02a0
+#define NV902D_SET_ROP_V 7:0
+
+#define NV902D_SET_OPERATION 0x02ac
+#define NV902D_SET_OPERATION_V 2:0
+#define NV902D_SET_OPERATION_V_SRCCOPY_AND 0x00000000
+#define NV902D_SET_OPERATION_V_ROP_AND 0x00000001
+#define NV902D_SET_OPERATION_V_BLEND_AND 0x00000002
+#define NV902D_SET_OPERATION_V_SRCCOPY 0x00000003
+#define NV902D_SET_OPERATION_V_ROP 0x00000004
+#define NV902D_SET_OPERATION_V_SRCCOPY_PREMULT 0x00000005
+#define NV902D_SET_OPERATION_V_BLEND_PREMULT 0x00000006
+
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT 0x02e8
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V 2:0
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8R5G6B5 0x00000000
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A1R5G5B5 0x00000001
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8R8G8B8 0x00000002
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8Y8 0x00000003
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8Y16 0x00000004
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_Y32 0x00000005
+#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_BYTE_EXPAND 0x00000006
+
+#define NV902D_SET_MONOCHROME_PATTERN_FORMAT 0x02ec
+#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V 0:0
+#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V_CGA6_M1 0x00000000
+#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V_LE_M1 0x00000001
+
+#define NV902D_RENDER_SOLID_PRIM_MODE 0x0580
+#define NV902D_RENDER_SOLID_PRIM_MODE_V 2:0
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_POINTS 0x00000000
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_LINES 0x00000001
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_POLYLINE 0x00000002
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_TRIANGLES 0x00000003
+#define NV902D_RENDER_SOLID_PRIM_MODE_V_RECTS 0x00000004
+
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT 0x0584
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V 7:0
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF32_GF32 0x000000CB
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8R8G8B8 0x000000CF
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2R10G10B10 0x000000DF
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8B8G8R8 0x000000D5
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2B10G10R10 0x000000D1
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8R8G8B8 0x000000E6
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8B8G8R8 0x000000F9
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_R5G6B5 0x000000E8
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A1R5G5B5 0x000000E9
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X1R5G5B5 0x000000F8
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y8 0x000000F3
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y16 0x000000EE
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y32 0x000000FF
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z1R5G5B5 0x000000FB
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O1R5G5B5 0x000000FC
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z8R8G8B8 0x000000FD
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O8R8G8B8 0x000000FE
+
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR 0x0588
+#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_V 31:0
+
+#define NV902D_RENDER_SOLID_PRIM_POINT_SET_X(j) (0x0600+(j)*8)
+#define NV902D_RENDER_SOLID_PRIM_POINT_SET_X_V 31:0
+
+#define NV902D_RENDER_SOLID_PRIM_POINT_Y(j) (0x0604+(j)*8)
+#define NV902D_RENDER_SOLID_PRIM_POINT_Y_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE 0x0800
+#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V 0:0
+#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_COLOR 0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_INDEX 0x00000001
+
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT 0x0804
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V 7:0
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8R8G8B8 0x000000CF
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2R10G10B10 0x000000DF
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8B8G8R8 0x000000D5
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2B10G10R10 0x000000D1
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8R8G8B8 0x000000E6
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8B8G8R8 0x000000F9
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_R5G6B5 0x000000E8
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A1R5G5B5 0x000000E9
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X1R5G5B5 0x000000F8
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y8 0x000000F3
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y16 0x000000EE
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y32 0x000000FF
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z1R5G5B5 0x000000FB
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O1R5G5B5 0x000000FC
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z8R8G8B8 0x000000FD
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O8R8G8B8 0x000000FE
+
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT 0x0808
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V 1:0
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I1 0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I4 0x00000001
+#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I8 0x00000002
+
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT 0x080c
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V 0:0
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_CGA6_M1 0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_LE_M1 0x00000001
+
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP 0x0810
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V 1:0
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_PIXEL 0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_BYTE 0x00000001
+#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_DWORD 0x00000002
+
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR0 0x0814
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR0_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR1 0x0818
+#define NV902D_SET_PIXELS_FROM_CPU_COLOR1_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY 0x081c
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V 0:0
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_TRANSPARENT 0x00000000
+#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_OPAQUE 0x00000001
+
+#define NV902D_SET_PIXELS_FROM_CPU_SRC_WIDTH 0x0838
+#define NV902D_SET_PIXELS_FROM_CPU_SRC_WIDTH_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_SRC_HEIGHT 0x083c
+#define NV902D_SET_PIXELS_FROM_CPU_SRC_HEIGHT_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_FRAC 0x0840
+#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_FRAC_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_INT 0x0844
+#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_INT_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_FRAC 0x0848
+#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_FRAC_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_INT 0x084c
+#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_INT_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_FRAC 0x0850
+#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_FRAC_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_INT 0x0854
+#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_INT_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC 0x0858
+#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_INT 0x085c
+#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_INT_V 31:0
+
+#define NV902D_PIXELS_FROM_CPU_DATA 0x0860
+#define NV902D_PIXELS_FROM_CPU_DATA_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP 0x0888
+#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V 0:0
+#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_FALSE 0x00000000
+#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_TRUE 0x00000001
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_X0 0x08b0
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_X0_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0 0x08b4
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH 0x08b8
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT 0x08bc
+#define NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC 0x08c0
+#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT 0x08c4
+#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC 0x08c8
+#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT 0x08cc
+#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC 0x08d0
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT 0x08d4
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT_V 31:0
+
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC 0x08d8
+#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC_V 31:0
+
+#define NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT 0x08dc
+#define NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT_V 31:0
+#endif /* _cl_fermi_twod_a_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl9039.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl9039.h
new file mode 100644
index 000000000000..b8282a615ec0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl9039.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2003-2004, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl_fermi_memory_to_memory_format_a_h_
+#define _cl_fermi_memory_to_memory_format_a_h_
+
+#define NV9039_SET_OBJECT 0x0000
+#define NV9039_SET_OBJECT_CLASS_ID 15:0
+#define NV9039_SET_OBJECT_ENGINE_ID 20:16
+
+#define NV9039_OFFSET_OUT_UPPER 0x0238
+#define NV9039_OFFSET_OUT_UPPER_VALUE 7:0
+
+#define NV9039_OFFSET_OUT 0x023c
+#define NV9039_OFFSET_OUT_VALUE 31:0
+
+#define NV9039_LAUNCH_DMA 0x0300
+#define NV9039_LAUNCH_DMA_SRC_INLINE 0:0
+#define NV9039_LAUNCH_DMA_SRC_INLINE_FALSE 0x00000000
+#define NV9039_LAUNCH_DMA_SRC_INLINE_TRUE 0x00000001
+#define NV9039_LAUNCH_DMA_SRC_MEMORY_LAYOUT 4:4
+#define NV9039_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000
+#define NV9039_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH 0x00000001
+#define NV9039_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8
+#define NV9039_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000
+#define NV9039_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001
+#define NV9039_LAUNCH_DMA_COMPLETION_TYPE 13:12
+#define NV9039_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000
+#define NV9039_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001
+#define NV9039_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002
+#define NV9039_LAUNCH_DMA_INTERRUPT_TYPE 17:16
+#define NV9039_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000
+#define NV9039_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001
+#define NV9039_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 20:20
+#define NV9039_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000
+#define NV9039_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001
+
+#define NV9039_OFFSET_IN_UPPER 0x030c
+#define NV9039_OFFSET_IN_UPPER_VALUE 7:0
+
+#define NV9039_OFFSET_IN 0x0310
+#define NV9039_OFFSET_IN_VALUE 31:0
+
+#define NV9039_PITCH_IN 0x0314
+#define NV9039_PITCH_IN_VALUE 31:0
+
+#define NV9039_PITCH_OUT 0x0318
+#define NV9039_PITCH_OUT_VALUE 31:0
+
+#define NV9039_LINE_LENGTH_IN 0x031c
+#define NV9039_LINE_LENGTH_IN_VALUE 31:0
+
+#define NV9039_LINE_COUNT 0x0320
+#define NV9039_LINE_COUNT_VALUE 31:0
+#endif /* _cl_fermi_memory_to_memory_format_a_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl906f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl906f.h
new file mode 100644
index 000000000000..673d668885bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl906f.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+ Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+#ifndef _cl906f_h_
+#define _cl906f_h_
+
+/* fields and values */
+#define NV906F_SEMAPHOREA (0x00000010)
+#define NV906F_SEMAPHOREA_OFFSET_UPPER 7:0
+#define NV906F_SEMAPHOREB (0x00000014)
+#define NV906F_SEMAPHOREB_OFFSET_LOWER 31:2
+#define NV906F_SEMAPHOREC (0x00000018)
+#define NV906F_SEMAPHOREC_PAYLOAD 31:0
+#define NV906F_SEMAPHORED (0x0000001C)
+#define NV906F_SEMAPHORED_OPERATION 3:0
+#define NV906F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001
+#define NV906F_SEMAPHORED_OPERATION_RELEASE 0x00000002
+#define NV906F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004
+#define NV906F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008
+#define NV906F_SEMAPHORED_ACQUIRE_SWITCH 12:12
+#define NV906F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000
+#define NV906F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001
+#define NV906F_SEMAPHORED_RELEASE_WFI 20:20
+#define NV906F_SEMAPHORED_RELEASE_WFI_EN 0x00000000
+#define NV906F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001
+#define NV906F_SEMAPHORED_RELEASE_SIZE 24:24
+#define NV906F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000
+#define NV906F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001
+#define NV906F_NON_STALL_INTERRUPT (0x00000020)
+#define NV906F_NON_STALL_INTERRUPT_HANDLE 31:0
+#define NV906F_SET_REFERENCE (0x00000050)
+#define NV906F_SET_REFERENCE_COUNT 31:0
+
+/* dma method formats */
+#define NV906F_DMA_METHOD_ADDRESS 11:0
+#define NV906F_DMA_SUBDEVICE_MASK 15:4
+#define NV906F_DMA_METHOD_SUBCHANNEL 15:13
+#define NV906F_DMA_TERT_OP 17:16
+#define NV906F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000)
+#define NV906F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001)
+#define NV906F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002)
+#define NV906F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003)
+#define NV906F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000)
+#define NV906F_DMA_METHOD_COUNT 28:16
+#define NV906F_DMA_IMMD_DATA 28:16
+#define NV906F_DMA_SEC_OP 31:29
+#define NV906F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000)
+#define NV906F_DMA_SEC_OP_INC_METHOD (0x00000001)
+#define NV906F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002)
+#define NV906F_DMA_SEC_OP_NON_INC_METHOD (0x00000003)
+#define NV906F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004)
+#define NV906F_DMA_SEC_OP_ONE_INC (0x00000005)
+#define NV906F_DMA_SEC_OP_RESERVED6 (0x00000006)
+#define NV906F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007)
+#endif /* _cl906f_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907c.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907c.h
new file mode 100644
index 000000000000..77366a2c89ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907c.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl907c_h_
+#define _cl907c_h_
+
+// class methods
+#define NV907C_SET_PRESENT_CONTROL (0x00000084)
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002)
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_AT_FRAME (0x00000003)
+#define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE 2:2
+#define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000)
+#define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001)
+#define NV907C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4
+#define NV907C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16
+#define NV907C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10
+#define NV907C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004)
+#define NV907C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0
+#define NV907C_SET_BASE_LUT_LO (0x000000E0)
+#define NV907C_SET_BASE_LUT_LO_ENABLE 31:30
+#define NV907C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000)
+#define NV907C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001)
+#define NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000002)
+#define NV907C_SET_BASE_LUT_LO_MODE 27:24
+#define NV907C_SET_BASE_LUT_LO_MODE_LORES (0x00000000)
+#define NV907C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001)
+#define NV907C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007)
+#define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008)
+#define NV907C_SET_BASE_LUT_HI (0x000000E4)
+#define NV907C_SET_BASE_LUT_HI_ORIGIN 31:0
+#define NV907C_SET_OUTPUT_LUT_LO (0x000000E8)
+#define NV907C_SET_OUTPUT_LUT_LO_ENABLE 31:30
+#define NV907C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000)
+#define NV907C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001)
+#define NV907C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000002)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE 27:24
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007)
+#define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008)
+#define NV907C_SET_CONTEXT_DMA_LUT (0x000000FC)
+#define NV907C_SET_CONTEXT_DMA_LUT_HANDLE 31:0
+#define NV907C_SET_CSC_RED2RED (0x00000140)
+#define NV907C_SET_CSC_RED2RED_OWNER 31:31
+#define NV907C_SET_CSC_RED2RED_OWNER_CORE (0x00000000)
+#define NV907C_SET_CSC_RED2RED_OWNER_BASE (0x00000001)
+#define NV907C_SET_CSC_RED2RED_COEFF 18:0
+#define NV907C_SET_CSC_GRN2RED (0x00000144)
+#define NV907C_SET_CSC_GRN2RED_COEFF 18:0
+#define NV907C_SET_CSC_BLU2RED (0x00000148)
+#define NV907C_SET_CSC_BLU2RED_COEFF 18:0
+#define NV907C_SET_CSC_CONSTANT2RED (0x0000014C)
+#define NV907C_SET_CSC_CONSTANT2RED_COEFF 18:0
+#define NV907C_SET_CSC_RED2GRN (0x00000150)
+#define NV907C_SET_CSC_RED2GRN_COEFF 18:0
+#define NV907C_SET_CSC_GRN2GRN (0x00000154)
+#define NV907C_SET_CSC_GRN2GRN_COEFF 18:0
+#define NV907C_SET_CSC_BLU2GRN (0x00000158)
+#define NV907C_SET_CSC_BLU2GRN_COEFF 18:0
+#define NV907C_SET_CSC_CONSTANT2GRN (0x0000015C)
+#define NV907C_SET_CSC_CONSTANT2GRN_COEFF 18:0
+#define NV907C_SET_CSC_RED2BLU (0x00000160)
+#define NV907C_SET_CSC_RED2BLU_COEFF 18:0
+#define NV907C_SET_CSC_GRN2BLU (0x00000164)
+#define NV907C_SET_CSC_GRN2BLU_COEFF 18:0
+#define NV907C_SET_CSC_BLU2BLU (0x00000168)
+#define NV907C_SET_CSC_BLU2BLU_COEFF 18:0
+#define NV907C_SET_CSC_CONSTANT2BLU (0x0000016C)
+#define NV907C_SET_CSC_CONSTANT2BLU_COEFF 18:0
+
+#define NV907C_SURFACE_SET_OFFSET(a,b) (0x00000400 + (a)*0x00000020 + (b)*0x00000004)
+#define NV907C_SURFACE_SET_OFFSET_ORIGIN 31:0
+#define NV907C_SURFACE_SET_SIZE(a) (0x00000408 + (a)*0x00000020)
+#define NV907C_SURFACE_SET_SIZE_WIDTH 15:0
+#define NV907C_SURFACE_SET_SIZE_HEIGHT 31:16
+#define NV907C_SURFACE_SET_STORAGE(a) (0x0000040C + (a)*0x00000020)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV907C_SURFACE_SET_STORAGE_PITCH 20:8
+#define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24
+#define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV907C_SURFACE_SET_PARAMS(a) (0x00000410 + (a)*0x00000020)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT 15:8
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NV907C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0
+#define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000)
+#define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002)
+#define NV907C_SURFACE_SET_PARAMS_GAMMA 2:2
+#define NV907C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000)
+#define NV907C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001)
+#define NV907C_SURFACE_SET_PARAMS_LAYOUT 5:4
+#define NV907C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000)
+#define NV907C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001)
+#define NV907C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002)
+#endif // _cl907c_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
new file mode 100644
index 000000000000..34bc3eafac7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl907d_h_
+#define _cl907d_h_
+
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:27
+
+
+// class methods
+#define NV907D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK 3:0
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004)
+#define NV907D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008)
+#define NV907D_DAC_SET_CONTROL_PROTOCOL 12:8
+#define NV907D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000)
+#define NV907D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013)
+
+#define NV907D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK 3:0
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004)
+#define NV907D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009)
+#define NV907D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NV907D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14
+#define NV907D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV907D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV907D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20
+#define NV907D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000)
+#define NV907D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001)
+#define NV907D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002)
+
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009)
+#define NV907D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTROL_STRUCTURE 0:0
+#define NV907D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001)
+#define NV907D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OVERSCAN_COLOR_RED 9:0
+#define NV907D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10
+#define NV907D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20
+#define NV907D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_SIZE_WIDTH 14:0
+#define NV907D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16
+#define NV907D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_SYNC_END_X 14:0
+#define NV907D_HEAD_SET_RASTER_SYNC_END_Y 30:16
+#define NV907D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_BLANK_END_X 14:0
+#define NV907D_HEAD_SET_RASTER_BLANK_END_Y 30:16
+#define NV907D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_BLANK_START_X 14:0
+#define NV907D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NV907D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300)
+#define NV907D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0
+#define NV907D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16
+#define NV907D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300)
+#define NV907D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0
+#define NV907D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10
+#define NV907D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20
+#define NV907D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0
+#define NV907D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001)
+#define NV907D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002)
+#define NV907D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2
+#define NV907D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001)
+#define NV907D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3
+#define NV907D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i))
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i))
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i))
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i))
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF)
+#define NV907D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF)
+#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5
+#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000)
+#define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001)
+#define NV907D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000)
+#define NV907D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001)
+#define NV907D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300)
+#define NV907D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000)
+#define NV907D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001)
+#define NV907D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0
+#define NV907D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OFFSET_ORIGIN 31:0
+#define NV907D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300)
+#define NV907D_HEAD_SET_SIZE_WIDTH 15:0
+#define NV907D_HEAD_SET_SIZE_HEIGHT 31:16
+#define NV907D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV907D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV907D_HEAD_SET_STORAGE_PITCH 20:8
+#define NV907D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24
+#define NV907D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV907D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV907D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PARAMS_FORMAT 15:8
+#define NV907D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NV907D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NV907D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0
+#define NV907D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000)
+#define NV907D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002)
+#define NV907D_HEAD_SET_PARAMS_GAMMA 2:2
+#define NV907D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000)
+#define NV907D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001)
+#define NV907D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0
+#define NV907D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NV907D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24
+#define NV907D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_SIZE 26:26
+#define NV907D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 13:8
+#define NV907D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 21:16
+#define NV907D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28
+#define NV907D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV907D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV907D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
+#define NV907D_HEAD_SET_DITHER_CONTROL(a) (0x00000490 + (a)*0x00000300)
+#define NV907D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
+#define NV907D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
+#define NV907D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001)
+#define NV907D_HEAD_SET_DITHER_CONTROL_BITS 2:1
+#define NV907D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000)
+#define NV907D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001)
+#define NV907D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE 6:3
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003)
+#define NV907D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004)
+#define NV907D_HEAD_SET_DITHER_CONTROL_PHASE 8:7
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002)
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16
+#define NV907D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24
+#define NV907D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300)
+#define NV907D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0
+#define NV907D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000)
+#define NV907D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001)
+#define NV907D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002)
+#define NV907D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2
+#define NV907D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000)
+#define NV907D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001)
+#define NV907D_HEAD_SET_PROCAMP_SAT_COS 19:8
+#define NV907D_HEAD_SET_PROCAMP_SAT_SINE 31:20
+#define NV907D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5
+#define NV907D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000)
+#define NV907D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001)
+#define NV907D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6
+#define NV907D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000)
+#define NV907D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001)
+#define NV907D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0
+#define NV907D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300)
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0
+#define NV907D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000)
+#define NV907D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003)
+#define NV907D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005)
+#endif // _cl907d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907e.h
new file mode 100644
index 000000000000..64ef0c578153
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907e.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl907e_h_
+#define _cl907e_h_
+
+// class methods
+#define NV907E_SET_PRESENT_CONTROL (0x00000084)
+#define NV907E_SET_PRESENT_CONTROL_BEGIN_MODE 1:0
+#define NV907E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP (0x00000000)
+#define NV907E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP (0x00000003)
+#define NV907E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4
+#define NV907E_SET_CONTEXT_DMA_ISO (0x000000C0)
+#define NV907E_SET_CONTEXT_DMA_ISO_HANDLE 31:0
+#define NV907E_SET_COMPOSITION_CONTROL (0x00000100)
+#define NV907E_SET_COMPOSITION_CONTROL_MODE 3:0
+#define NV907E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING (0x00000000)
+#define NV907E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING (0x00000001)
+#define NV907E_SET_COMPOSITION_CONTROL_MODE_OPAQUE (0x00000002)
+
+#define NV907E_SURFACE_SET_OFFSET (0x00000400)
+#define NV907E_SURFACE_SET_OFFSET_ORIGIN 31:0
+#define NV907E_SURFACE_SET_SIZE (0x00000408)
+#define NV907E_SURFACE_SET_SIZE_WIDTH 15:0
+#define NV907E_SURFACE_SET_SIZE_HEIGHT 31:16
+#define NV907E_SURFACE_SET_STORAGE (0x0000040C)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NV907E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NV907E_SURFACE_SET_STORAGE_PITCH 20:8
+#define NV907E_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24
+#define NV907E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NV907E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NV907E_SURFACE_SET_PARAMS (0x00000410)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT 15:8
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8 (0x00000028)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8 (0x00000029)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NV907E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NV907E_SURFACE_SET_PARAMS_COLOR_SPACE 1:0
+#define NV907E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB (0x00000000)
+#define NV907E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001)
+#define NV907E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002)
+#endif // _cl907e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
new file mode 100644
index 000000000000..2a2612d6e1e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _cl917d_h_
+#define _cl917d_h_
+
+// class methods
+#define NV917D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK 3:0
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004)
+#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009)
+#define NV917D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14
+#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20
+#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000)
+#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001)
+#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002)
+
+#define NV917D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24
+#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8
+#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16
+#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28
+#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
+#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
+#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
+#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
+#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001)
+#define NV917D_HEAD_SET_DITHER_CONTROL_BITS 2:1
+#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000)
+#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001)
+#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE 6:3
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003)
+#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004)
+#define NV917D_HEAD_SET_DITHER_CONTROL_PHASE 8:7
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001)
+#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002)
+#endif // _cl917d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cla0b5.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cla0b5.h
new file mode 100644
index 000000000000..fe5d10f05468
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cla0b5.h
@@ -0,0 +1,162 @@
+/*******************************************************************************
+ Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+*******************************************************************************/
+
+#ifndef _cla0b5_h_
+#define _cla0b5_h_
+
+#define NVA0B5_SET_SRC_PHYS_MODE (0x00000260)
+#define NVA0B5_SET_SRC_PHYS_MODE_TARGET 1:0
+#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000)
+#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
+#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
+#define NVA0B5_SET_DST_PHYS_MODE (0x00000264)
+#define NVA0B5_SET_DST_PHYS_MODE_TARGET 1:0
+#define NVA0B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000)
+#define NVA0B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
+#define NVA0B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
+#define NVA0B5_LAUNCH_DMA (0x00000300)
+#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
+#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
+#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001)
+#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002)
+#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE 2:2
+#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000)
+#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002)
+#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5
+#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000)
+#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001)
+#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002)
+#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7
+#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8
+#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9
+#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000)
+#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001)
+#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE 10:10
+#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000)
+#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001)
+#define NVA0B5_LAUNCH_DMA_BYPASS_L2 11:11
+#define NVA0B5_LAUNCH_DMA_BYPASS_L2_USE_PTE_SETTING (0x00000000)
+#define NVA0B5_LAUNCH_DMA_BYPASS_L2_FORCE_VOLATILE (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SRC_TYPE 12:12
+#define NVA0B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001)
+#define NVA0B5_LAUNCH_DMA_DST_TYPE 13:13
+#define NVA0B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000)
+#define NVA0B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMUL (0x0000000D)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMUL (0x0000000E)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000)
+#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001)
+#define NVA0B5_OFFSET_IN_UPPER (0x00000400)
+#define NVA0B5_OFFSET_IN_UPPER_UPPER 7:0
+#define NVA0B5_OFFSET_IN_LOWER (0x00000404)
+#define NVA0B5_OFFSET_IN_LOWER_VALUE 31:0
+#define NVA0B5_OFFSET_OUT_UPPER (0x00000408)
+#define NVA0B5_OFFSET_OUT_UPPER_UPPER 7:0
+#define NVA0B5_OFFSET_OUT_LOWER (0x0000040C)
+#define NVA0B5_OFFSET_OUT_LOWER_VALUE 31:0
+#define NVA0B5_PITCH_IN (0x00000410)
+#define NVA0B5_PITCH_IN_VALUE 31:0
+#define NVA0B5_PITCH_OUT (0x00000414)
+#define NVA0B5_PITCH_OUT_VALUE 31:0
+#define NVA0B5_LINE_LENGTH_IN (0x00000418)
+#define NVA0B5_LINE_LENGTH_IN_VALUE 31:0
+#define NVA0B5_LINE_COUNT (0x0000041C)
+#define NVA0B5_LINE_COUNT_VALUE 31:0
+#define NVA0B5_SET_REMAP_CONST_A (0x00000700)
+#define NVA0B5_SET_REMAP_CONST_A_V 31:0
+#define NVA0B5_SET_REMAP_CONST_B (0x00000704)
+#define NVA0B5_SET_REMAP_CONST_B_V 31:0
+#define NVA0B5_SET_REMAP_COMPONENTS (0x00000708)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X 2:0
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y 6:4
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z 10:8
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W 14:12
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005)
+#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006)
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002)
+#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003)
+#endif // _cla0b5_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc37a.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37a.h
new file mode 100644
index 000000000000..ded616f93388
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37a.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clc37a__h_
+#define _clc37a__h_
+
+#define NVC37A_UPDATE (0x00000200)
+#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004)
+#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0
+#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16
+#endif // _clc37a_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc37b.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37b.h
new file mode 100644
index 000000000000..0f7323bfa09b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37b.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clC37b_h_
+#define _clC37b_h_
+
+// dma opcode instructions
+#define NVC37B_DMA
+#define NVC37B_DMA_OPCODE 31:29
+#define NVC37B_DMA_OPCODE_METHOD 0x00000000
+#define NVC37B_DMA_OPCODE_JUMP 0x00000001
+#define NVC37B_DMA_OPCODE_NONINC_METHOD 0x00000002
+#define NVC37B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003
+#define NVC37B_DMA_METHOD_COUNT 27:18
+#define NVC37B_DMA_METHOD_OFFSET 13:2
+#define NVC37B_DMA_DATA 31:0
+#define NVC37B_DMA_DATA_NOP 0x00000000
+#define NVC37B_DMA_JUMP_OFFSET 11:2
+#define NVC37B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0
+
+// class methods
+#define NVC37B_UPDATE (0x00000200)
+#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW 1:1
+#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000)
+#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001)
+#define NVC37B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004)
+#define NVC37B_SET_POINT_OUT_X 15:0
+#define NVC37B_SET_POINT_OUT_Y 31:16
+#endif // _clC37b_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc37d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37d.h
new file mode 100644
index 000000000000..2b8c314c9ed9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37d.h
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clC37d_h_
+#define _clC37d_h_
+
+#define NV_DISP_NOTIFIER 0x00000000
+#define NV_DISP_NOTIFIER_SIZEOF 0x00000010
+#define NV_DISP_NOTIFIER__0 0x00000000
+#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0
+#define NV_DISP_NOTIFIER__0_FIELD 8:8
+#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9
+#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000
+#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001
+#define NV_DISP_NOTIFIER__0_R1 15:10
+#define NV_DISP_NOTIFIER__0_R2 23:16
+#define NV_DISP_NOTIFIER__0_R3 29:24
+#define NV_DISP_NOTIFIER__0_STATUS 31:30
+#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000
+#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001
+#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002
+#define NV_DISP_NOTIFIER__1 0x00000001
+#define NV_DISP_NOTIFIER__1_R4 31:0
+#define NV_DISP_NOTIFIER__2 0x00000002
+#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0
+#define NV_DISP_NOTIFIER__3 0x00000003
+#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0
+
+
+// class methods
+#define NVC37D_UPDATE (0x00000200)
+#define NVC37D_UPDATE_SPECIAL_HANDLING 21:20
+#define NVC37D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000)
+#define NVC37D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001)
+#define NVC37D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002)
+#define NVC37D_UPDATE_SPECIAL_HANDLING_REASON 19:12
+#define NVC37D_UPDATE_INHIBIT_INTERRUPTS 24:24
+#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000)
+#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001)
+#define NVC37D_SET_CONTEXT_DMA_NOTIFIER (0x00000208)
+#define NVC37D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0
+#define NVC37D_SET_NOTIFIER_CONTROL (0x0000020C)
+#define NVC37D_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVC37D_SET_NOTIFIER_CONTROL_OFFSET 11:4
+#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY 12:12
+#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000)
+#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS (0x00000218)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000)
+#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000)
+#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001)
+
+#define NVC37D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK 7:0
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040)
+#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DSI (0x0000000A)
+#define NVC37D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16
+#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20
+#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000)
+#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001)
+#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002)
+
+#define NVC37D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER 3:0
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i))
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007)
+#define NVC37D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT 17:16
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_NONE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_257 (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_1025 (0x00000002)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+
+#define NVC37D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002)
+#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003)
+#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3
+#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_SAT_COS 15:4
+#define NVC37D_HEAD_SET_PROCAMP_SAT_SINE 27:16
+#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28
+#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 29:29
+#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL 31:30
+#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_AUTO (0x00000000)
+#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_VIDEO (0x00000001)
+#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_GRAPHICS (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400)
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
+#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS 5:4
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2
+#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE 10:8
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004)
+#define NVC37D_HEAD_SET_DITHER_CONTROL_PHASE 13:12
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT 5:4
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400)
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0
+#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16
+#define NVC37D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_RASTER_SIZE_WIDTH 14:0
+#define NVC37D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16
+#define NVC37D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_RASTER_SYNC_END_X 14:0
+#define NVC37D_HEAD_SET_RASTER_SYNC_END_Y 30:16
+#define NVC37D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400)
+#define NVC37D_HEAD_SET_RASTER_BLANK_END_X 14:0
+#define NVC37D_HEAD_SET_RASTER_BLANK_END_Y 30:16
+#define NVC37D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_RASTER_BLANK_START_X 14:0
+#define NVC37D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004)
+#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
+#define NVC37D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004)
+#define NVC37D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NVC37D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA 29:28
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_NONE (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_SRGB (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV8_10 (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV12 (0x00000003)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT(a) (0x000020A4 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE 1:0
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257 (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025 (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE 5:4
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XRBIAS (0x00000001)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XVYCC (0x00000002)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE 9:8
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INDEX (0x00000000)
+#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE (0x00000001)
+#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT(a) (0x000020A8 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT_ORIGIN 31:0
+#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(a) (0x000020AC + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT_HANDLE 31:0
+#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0
+#define NVC37D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400)
+#define NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 4:0
+#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8
+#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000)
+#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR(i) (0x00000060 +(i))
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR__SIZE_1 4
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR0 (0x00000060)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR1 (0x00000061)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR2 (0x00000062)
+#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR3 (0x00000063)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR(i) (0x00000060 +(i))
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR__SIZE_1 4
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR0 (0x00000060)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR1 (0x00000061)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR2 (0x00000062)
+#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR3 (0x00000063)
+#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9
+#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000)
+#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001)
+#endif // _clC37d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc37e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37e.h
new file mode 100644
index 000000000000..99e5a737b0d1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc37e.h
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clC37e_h_
+#define _clC37e_h_
+
+// class methods
+#define NVC37E_UPDATE (0x00000200)
+#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12
+#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000)
+#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001)
+#define NVC37E_SET_SEMAPHORE_CONTROL (0x0000020C)
+#define NVC37E_SET_SEMAPHORE_CONTROL_OFFSET 7:0
+#define NVC37E_SET_SEMAPHORE_ACQUIRE (0x00000210)
+#define NVC37E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0
+#define NVC37E_SET_SEMAPHORE_RELEASE (0x00000214)
+#define NVC37E_SET_SEMAPHORE_RELEASE_VALUE 31:0
+#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218)
+#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0
+#define NVC37E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C)
+#define NVC37E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0
+#define NVC37E_SET_NOTIFIER_CONTROL (0x00000220)
+#define NVC37E_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVC37E_SET_NOTIFIER_CONTROL_OFFSET 11:4
+#define NVC37E_SET_SIZE (0x00000224)
+#define NVC37E_SET_SIZE_WIDTH 15:0
+#define NVC37E_SET_SIZE_HEIGHT 31:16
+#define NVC37E_SET_STORAGE (0x00000228)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NVC37E_SET_STORAGE_MEMORY_LAYOUT 4:4
+#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NVC37E_SET_PARAMS (0x0000022C)
+#define NVC37E_SET_PARAMS_FORMAT 7:0
+#define NVC37E_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NVC37E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F)
+#define NVC37E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NVC37E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVC37E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E)
+#define NVC37E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVC37E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6)
+#define NVC37E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NVC37E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9)
+#define NVC37E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF)
+#define NVC37E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022)
+#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024)
+#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NVC37E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NVC37E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028)
+#define NVC37E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422R (0x00000037)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A)
+#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422R (0x00000057)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N444 (0x0000005A)
+#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N420 (0x0000005B)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422R (0x00000077)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N444 (0x0000007A)
+#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N420 (0x0000007B)
+#define NVC37E_SET_PARAMS_COLOR_SPACE 9:8
+#define NVC37E_SET_PARAMS_COLOR_SPACE_RGB (0x00000000)
+#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001)
+#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002)
+#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_2020 (0x00000003)
+#define NVC37E_SET_PARAMS_INPUT_RANGE 13:12
+#define NVC37E_SET_PARAMS_INPUT_RANGE_BYPASS (0x00000000)
+#define NVC37E_SET_PARAMS_INPUT_RANGE_LIMITED (0x00000001)
+#define NVC37E_SET_PARAMS_INPUT_RANGE_FULL (0x00000002)
+#define NVC37E_SET_PARAMS_UNDERREPLICATE 16:16
+#define NVC37E_SET_PARAMS_UNDERREPLICATE_DISABLE (0x00000000)
+#define NVC37E_SET_PARAMS_UNDERREPLICATE_ENABLE (0x00000001)
+#define NVC37E_SET_PARAMS_DE_GAMMA 21:20
+#define NVC37E_SET_PARAMS_DE_GAMMA_NONE (0x00000000)
+#define NVC37E_SET_PARAMS_DE_GAMMA_SRGB (0x00000001)
+#define NVC37E_SET_PARAMS_DE_GAMMA_YUV8_10 (0x00000002)
+#define NVC37E_SET_PARAMS_DE_GAMMA_YUV12 (0x00000003)
+#define NVC37E_SET_PARAMS_CSC 17:17
+#define NVC37E_SET_PARAMS_CSC_DISABLE (0x00000000)
+#define NVC37E_SET_PARAMS_CSC_ENABLE (0x00000001)
+#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18
+#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000)
+#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001)
+#define NVC37E_SET_PARAMS_SWAP_UV 19:19
+#define NVC37E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000)
+#define NVC37E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001)
+#define NVC37E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004)
+#define NVC37E_SET_PLANAR_STORAGE_PITCH 12:0
+#define NVC37E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004)
+#define NVC37E_SET_CONTEXT_DMA_ISO_HANDLE 31:0
+#define NVC37E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004)
+#define NVC37E_SET_OFFSET_ORIGIN 31:0
+#define NVC37E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004)
+#define NVC37E_SET_POINT_IN_X 15:0
+#define NVC37E_SET_POINT_IN_Y 31:16
+#define NVC37E_SET_SIZE_IN (0x00000298)
+#define NVC37E_SET_SIZE_IN_WIDTH 14:0
+#define NVC37E_SET_SIZE_IN_HEIGHT 30:16
+#define NVC37E_SET_SIZE_OUT (0x000002A4)
+#define NVC37E_SET_SIZE_OUT_WIDTH 14:0
+#define NVC37E_SET_SIZE_OUT_HEIGHT 30:16
+#define NVC37E_SET_CONTROL_INPUT_LUT (0x000002B0)
+#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE 1:0
+#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257 (0x00000000)
+#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 (0x00000002)
+#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE 5:4
+#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY (0x00000000)
+#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XRBIAS (0x00000001)
+#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XVYCC (0x00000002)
+#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE 9:8
+#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INDEX (0x00000000)
+#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE (0x00000001)
+#define NVC37E_SET_OFFSET_INPUT_LUT (0x000002B4)
+#define NVC37E_SET_OFFSET_INPUT_LUT_ORIGIN 31:0
+#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT (0x000002B8)
+#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT_HANDLE 31:0
+#define NVC37E_SET_CSC_RED2RED (0x000002BC)
+#define NVC37E_SET_CSC_RED2RED_COEFF 18:0
+#define NVC37E_SET_CSC_GREEN2RED (0x000002C0)
+#define NVC37E_SET_CSC_GREEN2RED_COEFF 18:0
+#define NVC37E_SET_CSC_BLUE2RED (0x000002C4)
+#define NVC37E_SET_CSC_BLUE2RED_COEFF 18:0
+#define NVC37E_SET_CSC_CONSTANT2RED (0x000002C8)
+#define NVC37E_SET_CSC_CONSTANT2RED_COEFF 18:0
+#define NVC37E_SET_CSC_RED2GREEN (0x000002CC)
+#define NVC37E_SET_CSC_RED2GREEN_COEFF 18:0
+#define NVC37E_SET_CSC_GREEN2GREEN (0x000002D0)
+#define NVC37E_SET_CSC_GREEN2GREEN_COEFF 18:0
+#define NVC37E_SET_CSC_BLUE2GREEN (0x000002D4)
+#define NVC37E_SET_CSC_BLUE2GREEN_COEFF 18:0
+#define NVC37E_SET_CSC_CONSTANT2GREEN (0x000002D8)
+#define NVC37E_SET_CSC_CONSTANT2GREEN_COEFF 18:0
+#define NVC37E_SET_CSC_RED2BLUE (0x000002DC)
+#define NVC37E_SET_CSC_RED2BLUE_COEFF 18:0
+#define NVC37E_SET_CSC_GREEN2BLUE (0x000002E0)
+#define NVC37E_SET_CSC_GREEN2BLUE_COEFF 18:0
+#define NVC37E_SET_CSC_BLUE2BLUE (0x000002E4)
+#define NVC37E_SET_CSC_BLUE2BLUE_COEFF 18:0
+#define NVC37E_SET_CSC_CONSTANT2BLUE (0x000002E8)
+#define NVC37E_SET_CSC_CONSTANT2BLUE_COEFF 18:0
+#define NVC37E_SET_COMPOSITION_CONTROL (0x000002EC)
+#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0
+#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000)
+#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001)
+#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002)
+#define NVC37E_SET_COMPOSITION_CONTROL_DEPTH 11:4
+#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0)
+#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0
+#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003)
+#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVC37E_SET_KEY_ALPHA (0x000002F8)
+#define NVC37E_SET_KEY_ALPHA_MIN 15:0
+#define NVC37E_SET_KEY_ALPHA_MAX 31:16
+#define NVC37E_SET_KEY_RED_CR (0x000002FC)
+#define NVC37E_SET_KEY_RED_CR_MIN 15:0
+#define NVC37E_SET_KEY_RED_CR_MAX 31:16
+#define NVC37E_SET_KEY_GREEN_Y (0x00000300)
+#define NVC37E_SET_KEY_GREEN_Y_MIN 15:0
+#define NVC37E_SET_KEY_GREEN_Y_MAX 31:16
+#define NVC37E_SET_KEY_BLUE_CB (0x00000304)
+#define NVC37E_SET_KEY_BLUE_CB_MIN 15:0
+#define NVC37E_SET_KEY_BLUE_CB_MAX 31:16
+#define NVC37E_SET_PRESENT_CONTROL (0x00000308)
+#define NVC37E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0
+#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4
+#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8
+#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000)
+#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS (0x00000370)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000)
+#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000)
+#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001)
+#endif // _clC37e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h
new file mode 100644
index 000000000000..d83ac815e06c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 1993-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _clC57d_h_
+#define _clC57d_h_
+
+// class methods
+#define NVC57D_SET_CONTEXT_DMA_NOTIFIER (0x00000208)
+#define NVC57D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0
+
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+
+#define NVC57D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000)
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001)
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002)
+#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003)
+#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3
+#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000)
+#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001)
+#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28
+#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000)
+#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F)
+#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVC57D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_RASTER_SIZE_WIDTH 14:0
+#define NVC57D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16
+#define NVC57D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_RASTER_SYNC_END_X 14:0
+#define NVC57D_HEAD_SET_RASTER_SYNC_END_Y 30:16
+#define NVC57D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400)
+#define NVC57D_HEAD_SET_RASTER_BLANK_END_X 14:0
+#define NVC57D_HEAD_SET_RASTER_BLANK_END_Y 30:16
+#define NVC57D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_RASTER_BLANK_START_X 14:0
+#define NVC57D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NVC57D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0
+#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE 3:2
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVC57D_HEAD_SET_OLUT_CONTROL_SIZE 18:8
+#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0
+#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400)
+#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0
+#define NVC57D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400)
+#define NVC57D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0
+#endif // _clC57d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57e.h
new file mode 100644
index 000000000000..850d16fe438d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57e.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 1993-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _clC57e_h_
+#define _clC57e_h_
+
+// class methods
+#define NVC57E_SET_SIZE (0x00000224)
+#define NVC57E_SET_SIZE_WIDTH 15:0
+#define NVC57E_SET_SIZE_HEIGHT 31:16
+#define NVC57E_SET_STORAGE (0x00000228)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NVC57E_SET_STORAGE_MEMORY_LAYOUT 4:4
+#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
+#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001)
+#define NVC57E_SET_PARAMS (0x0000022C)
+#define NVC57E_SET_PARAMS_FORMAT 7:0
+#define NVC57E_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NVC57E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F)
+#define NVC57E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NVC57E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVC57E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E)
+#define NVC57E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVC57E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6)
+#define NVC57E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NVC57E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9)
+#define NVC57E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF)
+#define NVC57E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022)
+#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024)
+#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NVC57E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NVC57E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028)
+#define NVC57E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029)
+#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035)
+#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036)
+#define NVC57E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038)
+#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055)
+#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056)
+#define NVC57E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058)
+#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075)
+#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076)
+#define NVC57E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078)
+#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18
+#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000)
+#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001)
+#define NVC57E_SET_PARAMS_SWAP_UV 19:19
+#define NVC57E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000)
+#define NVC57E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001)
+#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE 22:22
+#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000)
+#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001)
+#define NVC57E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004)
+#define NVC57E_SET_PLANAR_STORAGE_PITCH 12:0
+#define NVC57E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004)
+#define NVC57E_SET_CONTEXT_DMA_ISO_HANDLE 31:0
+#define NVC57E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004)
+#define NVC57E_SET_OFFSET_ORIGIN 31:0
+#define NVC57E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004)
+#define NVC57E_SET_POINT_IN_X 15:0
+#define NVC57E_SET_POINT_IN_Y 31:16
+#define NVC57E_SET_SIZE_IN (0x00000298)
+#define NVC57E_SET_SIZE_IN_WIDTH 15:0
+#define NVC57E_SET_SIZE_IN_HEIGHT 31:16
+#define NVC57E_SET_SIZE_OUT (0x000002A4)
+#define NVC57E_SET_SIZE_OUT_WIDTH 15:0
+#define NVC57E_SET_SIZE_OUT_HEIGHT 31:16
+#define NVC57E_SET_PRESENT_CONTROL (0x00000308)
+#define NVC57E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0
+#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4
+#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8
+#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000)
+#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001)
+#define NVC57E_SET_FMT_COEFFICIENT_C00 (0x00000400)
+#define NVC57E_SET_FMT_COEFFICIENT_C00_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C01 (0x00000404)
+#define NVC57E_SET_FMT_COEFFICIENT_C01_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C02 (0x00000408)
+#define NVC57E_SET_FMT_COEFFICIENT_C02_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C03 (0x0000040C)
+#define NVC57E_SET_FMT_COEFFICIENT_C03_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C10 (0x00000410)
+#define NVC57E_SET_FMT_COEFFICIENT_C10_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C11 (0x00000414)
+#define NVC57E_SET_FMT_COEFFICIENT_C11_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C12 (0x00000418)
+#define NVC57E_SET_FMT_COEFFICIENT_C12_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C13 (0x0000041C)
+#define NVC57E_SET_FMT_COEFFICIENT_C13_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C20 (0x00000420)
+#define NVC57E_SET_FMT_COEFFICIENT_C20_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C21 (0x00000424)
+#define NVC57E_SET_FMT_COEFFICIENT_C21_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C22 (0x00000428)
+#define NVC57E_SET_FMT_COEFFICIENT_C22_VALUE 20:0
+#define NVC57E_SET_FMT_COEFFICIENT_C23 (0x0000042C)
+#define NVC57E_SET_FMT_COEFFICIENT_C23_VALUE 20:0
+#define NVC57E_SET_ILUT_CONTROL (0x00000440)
+#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE 0:0
+#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVC57E_SET_ILUT_CONTROL_MIRROR 1:1
+#define NVC57E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVC57E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVC57E_SET_ILUT_CONTROL_MODE 3:2
+#define NVC57E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVC57E_SET_ILUT_CONTROL_SIZE 18:8
+#define NVC57E_SET_CONTEXT_DMA_ILUT (0x00000444)
+#define NVC57E_SET_CONTEXT_DMA_ILUT_HANDLE 31:0
+#define NVC57E_SET_OFFSET_ILUT (0x00000448)
+#define NVC57E_SET_OFFSET_ILUT_ORIGIN 31:0
+#endif // _clC57e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/drf.h b/drivers/gpu/drm/nouveau/include/nvhw/drf.h
new file mode 100644
index 000000000000..bd0fc41446e2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/drf.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NVHW_DRF_H__
+#define __NVHW_DRF_H__
+
+/* Helpers common to all DRF accessors. */
+#define DRF_LO(drf) (0 ? drf)
+#define DRF_HI(drf) (1 ? drf)
+#define DRF_BITS(drf) (DRF_HI(drf) - DRF_LO(drf) + 1)
+#define DRF_MASK(drf) (~0ULL >> (64 - DRF_BITS(drf)))
+#define DRF_SMASK(drf) (DRF_MASK(drf) << DRF_LO(drf))
+
+/* Helpers for DRF-MW accessors. */
+#define DRF_MX_MW(drf) drf
+#define DRF_MX(drf) DRF_MX_##drf
+#define DRF_MW(drf) DRF_MX(drf)
+#define DRF_MW_SPANS(o,drf) (DRF_LW_IDX((o),drf) != DRF_HW_IDX((o),drf))
+#define DRF_MW_SIZE(o) (sizeof((o)[0]) * 8)
+
+#define DRF_LW_IDX(o,drf) (DRF_LO(DRF_MW(drf)) / DRF_MW_SIZE(o))
+#define DRF_LW_LO(o,drf) (DRF_LO(DRF_MW(drf)) % DRF_MW_SIZE(o))
+#define DRF_LW_HI(o,drf) (DRF_MW_SPANS((o),drf) ? (DRF_MW_SIZE(o) - 1) : DRF_HW_HI((o),drf))
+#define DRF_LW_BITS(o,drf) (DRF_LW_HI((o),drf) - DRF_LW_LO((o),drf) + 1)
+#define DRF_LW_MASK(o,drf) (~0ULL >> (64 - DRF_LW_BITS((o),drf)))
+#define DRF_LW_SMASK(o,drf) (DRF_LW_MASK((o),drf) << DRF_LW_LO((o),drf))
+#define DRF_LW_GET(o,drf) (((o)[DRF_LW_IDX((o),drf)] >> DRF_LW_LO((o),drf)) & DRF_LW_MASK((o),drf))
+#define DRF_LW_VAL(o,drf,v) (((v) & DRF_LW_MASK((o),drf)) << DRF_LW_LO((o),drf))
+#define DRF_LW_CLR(o,drf) ((o)[DRF_LW_IDX((o),drf)] & ~DRF_LW_SMASK((o),drf))
+#define DRF_LW_SET(o,drf,v) (DRF_LW_CLR((o),drf) | DRF_LW_VAL((o),drf,(v)))
+
+#define DRF_HW_IDX(o,drf) (DRF_HI(DRF_MW(drf)) / DRF_MW_SIZE(o))
+#define DRF_HW_LO(o,drf) 0
+#define DRF_HW_HI(o,drf) (DRF_HI(DRF_MW(drf)) % DRF_MW_SIZE(o))
+#define DRF_HW_BITS(o,drf) (DRF_HW_HI((o),drf) - DRF_HW_LO((o),drf) + 1)
+#define DRF_HW_MASK(o,drf) (~0ULL >> (64 - DRF_HW_BITS((o),drf)))
+#define DRF_HW_SMASK(o,drf) (DRF_HW_MASK((o),drf) << DRF_HW_LO((o),drf))
+#define DRF_HW_GET(o,drf) ((o)[DRF_HW_IDX(o,drf)] & DRF_HW_SMASK((o),drf))
+#define DRF_HW_VAL(o,drf,v) (((long long)(v) >> DRF_LW_BITS((o),drf)) & DRF_HW_SMASK((o),drf))
+#define DRF_HW_CLR(o,drf) ((o)[DRF_HW_IDX((o),drf)] & ~DRF_HW_SMASK((o),drf))
+#define DRF_HW_SET(o,drf,v) (DRF_HW_CLR((o),drf) | DRF_HW_VAL((o),drf,(v)))
+
+/* DRF accessors. */
+#define NVVAL_X(drf,v) (((v) & DRF_MASK(drf)) << DRF_LO(drf))
+#define NVVAL_N(X,d,r,f, v) NVVAL_X(d##_##r##_##f, (v))
+#define NVVAL_I(X,d,r,f,i,v) NVVAL_X(d##_##r##_##f(i), (v))
+#define NVVAL_(X,_1,_2,_3,_4,_5,IMPL,...) IMPL
+#define NVVAL(A...) NVVAL_(X, ##A, NVVAL_I, NVVAL_N)(X, ##A)
+
+#define NVDEF_N(X,d,r,f, v) NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v)
+#define NVDEF_I(X,d,r,f,i,v) NVVAL_X(d##_##r##_##f(i), d##_##r##_##f##_##v)
+#define NVDEF_(X,_1,_2,_3,_4,_5,IMPL,...) IMPL
+#define NVDEF(A...) NVDEF_(X, ##A, NVDEF_I, NVDEF_N)(X, ##A)
+
+#define NVVAL_GET_X(o,drf) (((o) >> DRF_LO(drf)) & DRF_MASK(drf))
+#define NVVAL_GET_N(X,o,d,r,f ) NVVAL_GET_X(o, d##_##r##_##f)
+#define NVVAL_GET_I(X,o,d,r,f,i) NVVAL_GET_X(o, d##_##r##_##f(i))
+#define NVVAL_GET_(X,_1,_2,_3,_4,_5,IMPL,...) IMPL
+#define NVVAL_GET(A...) NVVAL_GET_(X, ##A, NVVAL_GET_I, NVVAL_GET_N)(X, ##A)
+
+#define NVVAL_TEST_X(o,drf,cmp,drfv) (NVVAL_GET_X((o), drf) cmp drfv)
+#define NVVAL_TEST_N(X,o,d,r,f, cmp,v) NVVAL_TEST_X(o, d##_##r##_##f , cmp, (v))
+#define NVVAL_TEST_I(X,o,d,r,f,i,cmp,v) NVVAL_TEST_X(o, d##_##r##_##f(i), cmp, (v))
+#define NVVAL_TEST_(X,_1,_2,_3,_4,_5,_6,_7,IMPL,...) IMPL
+#define NVVAL_TEST(A...) NVVAL_TEST_(X, ##A, NVVAL_TEST_I, NVVAL_TEST_N)(X, ##A)
+
+#define NVDEF_TEST_N(X,o,d,r,f, cmp,v) NVVAL_TEST_X(o, d##_##r##_##f , cmp, d##_##r##_##f##_##v)
+#define NVDEF_TEST_I(X,o,d,r,f,i,cmp,v) NVVAL_TEST_X(o, d##_##r##_##f(i), cmp, d##_##r##_##f##_##v)
+#define NVDEF_TEST_(X,_1,_2,_3,_4,_5,_6,_7,IMPL,...) IMPL
+#define NVDEF_TEST(A...) NVDEF_TEST_(X, ##A, NVDEF_TEST_I, NVDEF_TEST_N)(X, ##A)
+
+#define NVVAL_SET_X(o,drf,v) (((o) & ~DRF_SMASK(drf)) | NVVAL_X(drf, (v)))
+#define NVVAL_SET_N(X,o,d,r,f, v) NVVAL_SET_X(o, d##_##r##_##f, (v))
+#define NVVAL_SET_I(X,o,d,r,f,i,v) NVVAL_SET_X(o, d##_##r##_##f(i), (v))
+#define NVVAL_SET_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define NVVAL_SET(A...) NVVAL_SET_(X, ##A, NVVAL_SET_I, NVVAL_SET_N)(X, ##A)
+
+#define NVDEF_SET_N(X,o,d,r,f, v) NVVAL_SET_X(o, d##_##r##_##f, d##_##r##_##f##_##v)
+#define NVDEF_SET_I(X,o,d,r,f,i,v) NVVAL_SET_X(o, d##_##r##_##f(i), d##_##r##_##f##_##v)
+#define NVDEF_SET_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define NVDEF_SET(A...) NVDEF_SET_(X, ##A, NVDEF_SET_I, NVDEF_SET_N)(X, ##A)
+
+/* DRF-MW accessors. */
+#define NVVAL_MW_GET_X(o,drf) \
+ ((DRF_MW_SPANS((o),drf) ? \
+ (DRF_HW_GET((o),drf) << DRF_LW_BITS((o),drf)) : 0) | DRF_LW_GET((o),drf))
+#define NVVAL_MW_GET_N(X,o,d,r,f ) NVVAL_MW_GET_X((o), d##_##r##_##f)
+#define NVVAL_MW_GET_I(X,o,d,r,f,i) NVVAL_MW_GET_X((o), d##_##r##_##f(i))
+#define NVVAL_MW_GET_(X,_1,_2,_3,_4,_5,IMPL,...) IMPL
+#define NVVAL_MW_GET(A...) NVVAL_MW_GET_(X, ##A, NVVAL_MW_GET_I, NVVAL_MW_GET_N)(X, ##A)
+
+#define NVVAL_MW_SET_X(o,drf,v) do { \
+ (o)[DRF_LW_IDX((o),drf)] = DRF_LW_SET((o),drf,(v)); \
+ if (DRF_MW_SPANS((o),drf)) \
+ (o)[DRF_HW_IDX((o),drf)] = DRF_HW_SET((o),drf,(v)); \
+} while(0)
+#define NVVAL_MW_SET_N(X,o,d,r,f, v) NVVAL_MW_SET_X((o), d##_##r##_##f, (v))
+#define NVVAL_MW_SET_I(X,o,d,r,f,i,v) NVVAL_MW_SET_X((o), d##_##r##_##f(i), (v))
+#define NVVAL_MW_SET_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define NVVAL_MW_SET(A...) NVVAL_MW_SET_(X, ##A, NVVAL_MW_SET_I, NVVAL_MW_SET_N)(X, ##A)
+
+#define NVDEF_MW_SET_N(X,o,d,r,f, v) NVVAL_MW_SET_X(o, d##_##r##_##f, d##_##r##_##f##_##v)
+#define NVDEF_MW_SET_I(X,o,d,r,f,i,v) NVVAL_MW_SET_X(o, d##_##r##_##f(i), d##_##r##_##f##_##v)
+#define NVDEF_MW_SET_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define NVDEF_MW_SET(A...) NVDEF_MW_SET_(X, ##A, NVDEF_MW_SET_I, NVDEF_MW_SET_N)(X, ##A)
+
+/* Helper for reading an arbitrary object. */
+#define DRF_RD_X(e,p,o,dr) e((p), (o), dr)
+#define DRF_RD_N(X,e,p,o,d,r ) DRF_RD_X(e, (p), (o), d##_##r)
+#define DRF_RD_I(X,e,p,o,d,r,i) DRF_RD_X(e, (p), (o), d##_##r(i))
+#define DRF_RD_(X,_1,_2,_3,_4,_5,_6,IMPL,...) IMPL
+#define DRF_RD(A...) DRF_RD_(X, ##A, DRF_RD_I, DRF_RD_N)(X, ##A)
+
+/* Helper for writing an arbitrary object. */
+#define DRF_WR_X(e,p,o,dr,v) e((p), (o), dr, (v))
+#define DRF_WR_N(X,e,p,o,d,r, v) DRF_WR_X(e, (p), (o), d##_##r , (v))
+#define DRF_WR_I(X,e,p,o,d,r,i,v) DRF_WR_X(e, (p), (o), d##_##r(i), (v))
+#define DRF_WR_(X,_1,_2,_3,_4,_5,_6,_7,IMPL,...) IMPL
+#define DRF_WR(A...) DRF_WR_(X, ##A, DRF_WR_I, DRF_WR_N)(X, ##A)
+
+/* Helper for modifying an arbitrary object. */
+#define DRF_MR_X(er,ew,ty,p,o,dr,m,v) ({ \
+ ty _t = DRF_RD_X(er, (p), (o), dr); \
+ DRF_WR_X(ew, (p), (o), dr, (_t & ~(m)) | (v)); \
+ _t; \
+})
+#define DRF_MR_N(X,er,ew,ty,p,o,d,r ,m,v) DRF_MR_X(er, ew, ty, (p), (o), d##_##r , (m), (v))
+#define DRF_MR_I(X,er,ew,ty,p,o,d,r,i,m,v) DRF_MR_X(er, ew, ty, (p), (o), d##_##r(i), (m), (v))
+#define DRF_MR_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,IMPL,...) IMPL
+#define DRF_MR(A...) DRF_MR_(X, ##A, DRF_MR_I, DRF_MR_N)(X, ##A)
+
+/* Helper for extracting a field value from arbitrary object. */
+#define DRF_RV_X(e,p,o,dr,drf) NVVAL_GET_X(DRF_RD_X(e, (p), (o), dr), drf)
+#define DRF_RV_N(X,e,p,o,d,r, f) DRF_RV_X(e, (p), (o), d##_##r , d##_##r##_##f)
+#define DRF_RV_I(X,e,p,o,d,r,i,f) DRF_RV_X(e, (p), (o), d##_##r(i), d##_##r##_##f)
+#define DRF_RV_(X,_1,_2,_3,_4,_5,_6,_7,IMPL,...) IMPL
+#define DRF_RV(A...) DRF_RV_(X, ##A, DRF_RV_I, DRF_RV_N)(X, ##A)
+
+/* Helper for writing field value to arbitrary object (all other bits cleared). */
+#define DRF_WV_N(X,e,p,o,d,r, f,v) \
+ DRF_WR_X(e, (p), (o), d##_##r , NVVAL_X(d##_##r##_##f, (v)))
+#define DRF_WV_I(X,e,p,o,d,r,i,f,v) \
+ DRF_WR_X(e, (p), (o), d##_##r(i), NVVAL_X(d##_##r##_##f, (v)))
+#define DRF_WV_(X,_1,_2,_3,_4,_5,_6,_7,_8,IMPL,...) IMPL
+#define DRF_WV(A...) DRF_WV_(X, ##A, DRF_WV_I, DRF_WV_N)(X, ##A)
+
+/* Helper for writing field definition to arbitrary object (all other bits cleared). */
+#define DRF_WD_N(X,e,p,o,d,r, f,v) \
+ DRF_WR_X(e, (p), (o), d##_##r , NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v))
+#define DRF_WD_I(X,e,p,o,d,r,i,f,v) \
+ DRF_WR_X(e, (p), (o), d##_##r(i), NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v))
+#define DRF_WD_(X,_1,_2,_3,_4,_5,_6,_7,_8,IMPL,...) IMPL
+#define DRF_WD(A...) DRF_WD_(X, ##A, DRF_WD_I, DRF_WD_N)(X, ##A)
+
+/* Helper for modifying field value in arbitrary object. */
+#define DRF_MV_N(X,er,ew,ty,p,o,d,r, f,v) \
+ NVVAL_GET_X(DRF_MR_X(er, ew, ty, (p), (o), d##_##r , DRF_SMASK(d##_##r##_##f), \
+ NVVAL_X(d##_##r##_##f, (v))), d##_##r##_##f)
+#define DRF_MV_I(X,er,ew,ty,p,o,d,r,i,f,v) \
+ NVVAL_GET_X(DRF_MR_X(er, ew, ty, (p), (o), d##_##r(i), DRF_SMASK(d##_##r##_##f), \
+ NVVAL_X(d##_##r##_##f, (v))), d##_##r##_##f)
+#define DRF_MV_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,IMPL,...) IMPL
+#define DRF_MV(A...) DRF_MV_(X, ##A, DRF_MV_I, DRF_MV_N)(X, ##A)
+
+/* Helper for modifying field definition in arbitrary object. */
+#define DRF_MD_N(X,er,ew,ty,p,o,d,r, f,v) \
+ NVVAL_GET_X(DRF_MR_X(er, ew, ty, (p), (o), d##_##r , DRF_SMASK(d##_##r##_##f), \
+ NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v)), d##_##r##_##f)
+#define DRF_MD_I(X,er,ew,ty,p,o,d,r,i,f,v) \
+ NVVAL_GET_X(DRF_MR_X(er, ew, ty, (p), (o), d##_##r(i), DRF_SMASK(d##_##r##_##f), \
+ NVVAL_X(d##_##r##_##f, d##_##r##_##f##_##v)), d##_##r##_##f)
+#define DRF_MD_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,IMPL,...) IMPL
+#define DRF_MD(A...) DRF_MD_(X, ##A, DRF_MD_I, DRF_MD_N)(X, ##A)
+
+/* Helper for testing against field value in aribtrary object */
+#define DRF_TV_N(X,e,p,o,d,r, f,cmp,v) \
+ NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r ), d##_##r##_##f, cmp, (v))
+#define DRF_TV_I(X,e,p,o,d,r,i,f,cmp,v) \
+ NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r(i)), d##_##r##_##f, cmp, (v))
+#define DRF_TV_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,IMPL,...) IMPL
+#define DRF_TV(A...) DRF_TV_(X, ##A, DRF_TV_I, DRF_TV_N)(X, ##A)
+
+/* Helper for testing against field definition in aribtrary object */
+#define DRF_TD_N(X,e,p,o,d,r, f,cmp,v) \
+ NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r ), d##_##r##_##f, cmp, d##_##r##_##f##_##v)
+#define DRF_TD_I(X,e,p,o,d,r,i,f,cmp,v) \
+ NVVAL_TEST_X(DRF_RD_X(e, (p), (o), d##_##r(i)), d##_##r##_##f, cmp, d##_##r##_##f##_##v)
+#define DRF_TD_(X,_1,_2,_3,_4,_5,_6,_7,_8,_9,IMPL,...) IMPL
+#define DRF_TD(A...) DRF_TD_(X, ##A, DRF_TD_I, DRF_TD_N)(X, ##A)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index e63c6c965b54..347d2c020bd1 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -12,9 +12,9 @@ struct nvif_client {
bool super;
};
-int nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
+int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
struct nvif_client *);
-void nvif_client_fini(struct nvif_client *);
+void nvif_client_dtor(struct nvif_client *);
int nvif_client_ioctl(struct nvif_client *, void *, u32);
int nvif_client_suspend(struct nvif_client *);
int nvif_client_resume(struct nvif_client *);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index c2a572c67a76..b0e59800a320 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -18,9 +18,9 @@ struct nvif_device {
struct nvif_user user;
};
-int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
- struct nvif_device *);
-void nvif_device_fini(struct nvif_device *);
+int nvif_device_ctor(struct nvif_object *, const char *name, u32 handle,
+ s32 oclass, void *, u32, struct nvif_device *);
+void nvif_device_dtor(struct nvif_device *);
u64 nvif_device_time(struct nvif_device *);
/*XXX*/
diff --git a/drivers/gpu/drm/nouveau/include/nvif/disp.h b/drivers/gpu/drm/nouveau/include/nvif/disp.h
index 7c0eda375c01..07ac544f282f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/disp.h
@@ -7,6 +7,7 @@ struct nvif_disp {
struct nvif_object object;
};
-int nvif_disp_ctor(struct nvif_device *, s32 oclass, struct nvif_disp *);
+int nvif_disp_ctor(struct nvif_device *, const char *name, s32 oclass,
+ struct nvif_disp *);
void nvif_disp_dtor(struct nvif_disp *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mem.h b/drivers/gpu/drm/nouveau/include/nvif/mem.h
index 80ee4ab0f016..9e1071dd56a0 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mem.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mem.h
@@ -10,11 +10,13 @@ struct nvif_mem {
u64 size;
};
-int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
- u64 size, void *argv, u32 argc, struct nvif_mem *);
-int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
- u64 size, void *argv, u32 argc, struct nvif_mem *);
-void nvif_mem_fini(struct nvif_mem *);
+int nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass,
+ int type, u8 page, u64 size, void *argv, u32 argc,
+ struct nvif_mem *);
+int nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type,
+ u8 page, u64 size, void *argv, u32 argc, struct nvif_mem *);
+void nvif_mem_dtor(struct nvif_mem *);
-int nvif_mem_init_map(struct nvif_mmu *, u8 type, u64 size, struct nvif_mem *);
+int nvif_mem_ctor_map(struct nvif_mmu *, const char *name, u8 type, u64 size,
+ struct nvif_mem *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
index cec1e88a0a05..2035ef1d35f5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -31,8 +31,9 @@ struct nvif_mmu {
u8 *kind;
};
-int nvif_mmu_init(struct nvif_object *, s32 oclass, struct nvif_mmu *);
-void nvif_mmu_fini(struct nvif_mmu *);
+int nvif_mmu_ctor(struct nvif_object *, const char *name, s32 oclass,
+ struct nvif_mmu *);
+void nvif_mmu_dtor(struct nvif_mmu *);
static inline bool
nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 6863732eb286..39f6b7ee1719 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -4,6 +4,7 @@
struct nvif_notify {
struct nvif_object *object;
+ const char *name;
int index;
#define NVIF_NOTIFY_USER 0
@@ -24,10 +25,10 @@ struct nvif_notify {
struct work_struct work;
};
-int nvif_notify_init(struct nvif_object *, int (*func)(struct nvif_notify *),
- bool work, u8 type, void *data, u32 size, u32 reply,
- struct nvif_notify *);
-int nvif_notify_fini(struct nvif_notify *);
+int nvif_notify_ctor(struct nvif_object *, const char *name,
+ int (*func)(struct nvif_notify *), bool work, u8 type,
+ void *data, u32 size, u32 reply, struct nvif_notify *);
+int nvif_notify_dtor(struct nvif_notify *);
int nvif_notify_get(struct nvif_notify *);
int nvif_notify_put(struct nvif_notify *);
int nvif_notify(const void *, u32, const void *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 604fabc0e689..1e4c158d20fa 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVIF_OBJECT_H__
#define __NVIF_OBJECT_H__
-
#include <nvif/os.h>
struct nvif_sclass {
@@ -11,7 +10,9 @@ struct nvif_sclass {
};
struct nvif_object {
+ struct nvif_parent *parent;
struct nvif_client *client;
+ const char *name;
u32 handle;
s32 oclass;
void *priv; /*XXX: hack */
@@ -21,9 +22,9 @@ struct nvif_object {
} map;
};
-int nvif_object_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
- struct nvif_object *);
-void nvif_object_fini(struct nvif_object *);
+int nvif_object_ctor(struct nvif_object *, const char *name, u32 handle,
+ s32 oclass, void *, u32, struct nvif_object *);
+void nvif_object_dtor(struct nvif_object *);
int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
int nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
void nvif_object_sclass_put(struct nvif_sclass **);
@@ -115,6 +116,19 @@ struct nvif_mclass {
_cid; \
})
+#define NVIF_RD32_(p,o,dr) nvif_rd32((p), (o) + (dr))
+#define NVIF_WR32_(p,o,dr,f) nvif_wr32((p), (o) + (dr), (f))
+#define NVIF_RD32(p,A...) DRF_RD(NVIF_RD32_, (p), 0, ##A)
+#define NVIF_RV32(p,A...) DRF_RV(NVIF_RD32_, (p), 0, ##A)
+#define NVIF_TV32(p,A...) DRF_TV(NVIF_RD32_, (p), 0, ##A)
+#define NVIF_TD32(p,A...) DRF_TD(NVIF_RD32_, (p), 0, ##A)
+#define NVIF_WR32(p,A...) DRF_WR( NVIF_WR32_, (p), 0, ##A)
+#define NVIF_WV32(p,A...) DRF_WV( NVIF_WR32_, (p), 0, ##A)
+#define NVIF_WD32(p,A...) DRF_WD( NVIF_WR32_, (p), 0, ##A)
+#define NVIF_MR32(p,A...) DRF_MR(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
+#define NVIF_MV32(p,A...) DRF_MV(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
+#define NVIF_MD32(p,A...) DRF_MD(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
+
/*XXX*/
#include <core/object.h>
#define nvxx_object(a) ({ \
diff --git a/drivers/gpu/drm/nouveau/include/nvif/parent.h b/drivers/gpu/drm/nouveau/include/nvif/parent.h
new file mode 100644
index 000000000000..41cb1b0d90d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/parent.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVIF_PARENT_H__
+#define __NVIF_PARENT_H__
+#include <nvif/os.h>
+struct nvif_object;
+
+struct nvif_parent {
+ const struct nvif_parent_func {
+ void (*debugf)(struct nvif_object *, const char *fmt, ...) __printf(2, 3);
+ void (*errorf)(struct nvif_object *, const char *fmt, ...) __printf(2, 3);
+ } *func;
+};
+
+static inline void
+nvif_parent_dtor(struct nvif_parent *parent)
+{
+ parent->func = NULL;
+}
+
+static inline void
+nvif_parent_ctor(const struct nvif_parent_func *func, struct nvif_parent *parent)
+{
+ parent->func = func;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/printf.h b/drivers/gpu/drm/nouveau/include/nvif/printf.h
new file mode 100644
index 000000000000..6c299ec6be21
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/printf.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVIF_PRINTF_H__
+#define __NVIF_PRINTF_H__
+#include <nvif/client.h>
+#include <nvif/parent.h>
+
+#define NVIF_PRINT(l,o,f,a...) do { \
+ struct nvif_object *_o = (o); \
+ struct nvif_parent *_p = _o->parent; \
+ _p->func->l(_o, "[%s/%08x:%s] "f"\n", _o->client->object.name, _o->handle, _o->name, ##a); \
+} while(0)
+
+#ifndef NVIF_DEBUG_PRINT_DISABLE
+#define NVIF_DEBUG(o,f,a...) NVIF_PRINT(debugf, (o), f, ##a)
+#else
+#define NVIF_DEBUG(o,f,a...)
+#endif
+
+#define NVIF_ERROR(o,f,a...) NVIF_PRINT(errorf, (o), f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
new file mode 100644
index 000000000000..168d7694ede5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NVIF_PUSH_H__
+#define __NVIF_PUSH_H__
+#include <nvif/mem.h>
+#include <nvif/printf.h>
+
+#include <nvhw/drf.h>
+
+struct nvif_push {
+ int (*wait)(struct nvif_push *push, u32 size);
+ void (*kick)(struct nvif_push *push);
+
+ struct nvif_mem mem;
+
+ u32 *bgn;
+ u32 *cur;
+ u32 *seg;
+ u32 *end;
+};
+
+static inline __must_check int
+PUSH_WAIT(struct nvif_push *push, u32 size)
+{
+ if (push->cur + size >= push->end) {
+ int ret = push->wait(push, size);
+ if (ret)
+ return ret;
+ }
+#ifdef CONFIG_NOUVEAU_DEBUG_PUSH
+ push->seg = push->cur + size;
+#endif
+ return 0;
+}
+
+static inline int
+PUSH_KICK(struct nvif_push *push)
+{
+ push->kick(push);
+ return 0;
+}
+
+#ifdef CONFIG_NOUVEAU_DEBUG_PUSH
+#define PUSH_PRINTF(p,f,a...) do { \
+ struct nvif_push *_ppp = (p); \
+ u32 __o = _ppp->cur - (u32 *)_ppp->mem.object.map.ptr; \
+ NVIF_DEBUG(&_ppp->mem.object, "%08x: "f, __o * 4, ##a); \
+ (void)__o; \
+} while(0)
+#define PUSH_ASSERT_ON(a,b) WARN((a), b)
+#else
+#define PUSH_PRINTF(p,f,a...)
+#define PUSH_ASSERT_ON(a, b)
+#endif
+
+#define PUSH_ASSERT(a,b) do { \
+ static_assert( \
+ __builtin_choose_expr(__builtin_constant_p(a), (a), 1), b \
+ ); \
+ PUSH_ASSERT_ON(!(a), b); \
+} while(0)
+
+#define PUSH_DATA__(p,d,f,a...) do { \
+ struct nvif_push *_p = (p); \
+ u32 _d = (d); \
+ PUSH_ASSERT(_p->cur < _p->seg, "segment overrun"); \
+ PUSH_ASSERT(_p->cur < _p->end, "pushbuf overrun"); \
+ PUSH_PRINTF(_p, "%08x"f, _d, ##a); \
+ *_p->cur++ = _d; \
+} while(0)
+
+#define PUSH_DATA_(X,p,m,i0,i1,d,s,f,a...) PUSH_DATA__((p), (d), "-> "#m f, ##a)
+#define PUSH_DATA(p,d) PUSH_DATA__((p), (d), " data - %s", __func__)
+
+//XXX: error-check this against *real* pushbuffer end?
+#define PUSH_RSVD(p,d) do { \
+ struct nvif_push *__p = (p); \
+ __p->seg++; \
+ __p->end++; \
+ d; \
+} while(0)
+
+#ifdef CONFIG_NOUVEAU_DEBUG_PUSH
+#define PUSH_DATAp(X,p,m,i,o,d,s,f,a...) do { \
+ struct nvif_push *_pp = (p); \
+ const u32 *_dd = (d); \
+ u32 _s = (s), _i = (i?PUSH_##o##_INC); \
+ if (_s--) { \
+ PUSH_DATA_(X, _pp, X##m, i0, i1, *_dd++, 1, "+0x%x", 0); \
+ while (_s--) { \
+ PUSH_DATA_(X, _pp, X##m, i0, i1, *_dd++, 1, "+0x%x", _i); \
+ _i += (0?PUSH_##o##_INC); \
+ } \
+ } \
+} while(0)
+#else
+#define PUSH_DATAp(X,p,m,i,o,d,s,f,a...) do { \
+ struct nvif_push *_p = (p); \
+ u32 _s = (s); \
+ PUSH_ASSERT(_p->cur + _s <= _p->seg, "segment overrun"); \
+ PUSH_ASSERT(_p->cur + _s <= _p->end, "pushbuf overrun"); \
+ memcpy(_p->cur, (d), _s << 2); \
+ _p->cur += _s; \
+} while(0)
+#endif
+
+#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \
+ PUSH_##o##_HDR((p), s, mA, (c)+(n)); \
+ PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
+} while(0)
+#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
+ PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
+ PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
+ PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
+ PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
+ PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
+ PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
+ PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
+ PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
+ PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+} while(0)
+
+#define PUSH_1D(X,o,p,s,mA,dA) \
+ PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
+ PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
+ PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
+ PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
+ PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
+ PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
+ PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
+ PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
+ PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
+ PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \
+ X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+
+#define PUSH_1P(X,o,p,s,mA,dp,ds) \
+ PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
+ PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
+ X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
+ PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
+ X##mB, (dB), \
+ X##mA, (dA))
+
+#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
+#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \
+ PUSH_9P , PUSH_9D, \
+ PUSH_8P , PUSH_8D, \
+ PUSH_7P , PUSH_7D, \
+ PUSH_6P , PUSH_6D, \
+ PUSH_5P , PUSH_5D, \
+ PUSH_4P , PUSH_4D, \
+ PUSH_3P , PUSH_3D, \
+ PUSH_2P , PUSH_2D, \
+ PUSH_1P , PUSH_1D)(, ##A)
+
+#define PUSH_NVIM(p,c,m,d) do { \
+ struct nvif_push *__p = (p); \
+ u32 __d = (d); \
+ PUSH_IMMD_HDR(__p, c, m, __d); \
+ __p->cur--; \
+ PUSH_PRINTF(__p, "%08x-> "#m, __d); \
+ __p->cur++; \
+} while(0)
+#define PUSH_NVSQ(A...) PUSH(MTHD, ##A)
+#define PUSH_NV1I(A...) PUSH(1INC, ##A)
+#define PUSH_NVNI(A...) PUSH(NINC, ##A)
+
+
+#define PUSH_NV_1(X,o,p,c,mA,d...) \
+ PUSH_##o(p,c,c##_##mA,d)
+#define PUSH_NV_2(X,o,p,c,mA,dA,mB,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,d)
+#define PUSH_NV_3(X,o,p,c,mA,dA,mB,dB,mC,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,dB, \
+ c##_##mC,d)
+#define PUSH_NV_4(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,dB, \
+ c##_##mC,dC, \
+ c##_##mD,d)
+#define PUSH_NV_5(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,dB, \
+ c##_##mC,dC, \
+ c##_##mD,dD, \
+ c##_##mE,d)
+#define PUSH_NV_6(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,dB, \
+ c##_##mC,dC, \
+ c##_##mD,dD, \
+ c##_##mE,dE, \
+ c##_##mF,d)
+#define PUSH_NV_7(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,dB, \
+ c##_##mC,dC, \
+ c##_##mD,dD, \
+ c##_##mE,dE, \
+ c##_##mF,dF, \
+ c##_##mG,d)
+#define PUSH_NV_8(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,dB, \
+ c##_##mC,dC, \
+ c##_##mD,dD, \
+ c##_##mE,dE, \
+ c##_##mF,dF, \
+ c##_##mG,dG, \
+ c##_##mH,d)
+#define PUSH_NV_9(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,dB, \
+ c##_##mC,dC, \
+ c##_##mD,dD, \
+ c##_##mE,dE, \
+ c##_##mF,dF, \
+ c##_##mG,dG, \
+ c##_##mH,dH, \
+ c##_##mI,d)
+#define PUSH_NV_10(X,o,p,c,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,d...) \
+ PUSH_##o(p,c,c##_##mA,dA, \
+ c##_##mB,dB, \
+ c##_##mC,dC, \
+ c##_##mD,dD, \
+ c##_##mE,dE, \
+ c##_##mF,dF, \
+ c##_##mG,dG, \
+ c##_##mH,dH, \
+ c##_##mI,dI, \
+ c##_##mJ,d)
+
+#define PUSH_NV_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
+#define PUSH_NV(A...) PUSH_NV_(A, PUSH_NV_10, PUSH_NV_10, \
+ PUSH_NV_9 , PUSH_NV_9, \
+ PUSH_NV_8 , PUSH_NV_8, \
+ PUSH_NV_7 , PUSH_NV_7, \
+ PUSH_NV_6 , PUSH_NV_6, \
+ PUSH_NV_5 , PUSH_NV_5, \
+ PUSH_NV_4 , PUSH_NV_4, \
+ PUSH_NV_3 , PUSH_NV_3, \
+ PUSH_NV_2 , PUSH_NV_2, \
+ PUSH_NV_1 , PUSH_NV_1)(, ##A)
+
+#define PUSH_IMMD(A...) PUSH_NV(NVIM, ##A)
+#define PUSH_MTHD(A...) PUSH_NV(NVSQ, ##A)
+#define PUSH_1INC(A...) PUSH_NV(NV1I, ##A)
+#define PUSH_NINC(A...) PUSH_NV(NVNI, ##A)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push006c.h b/drivers/gpu/drm/nouveau/include/nvif/push006c.h
new file mode 100644
index 000000000000..a31c147e72c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/push006c.h
@@ -0,0 +1,73 @@
+#ifndef __NVIF_PUSH006C_H__
+#define __NVIF_PUSH006C_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/cl006c.h>
+
+#ifndef PUSH006C_SUBC
+// Host methods
+#define PUSH006C_SUBC_NV06E 0
+#define PUSH006C_SUBC_NV176E 0
+#define PUSH006C_SUBC_NV826F 0
+
+// ContextSurfaces2d
+#define PUSH006C_SUBC_NV042 0
+#define PUSH006C_SUBC_NV062 0
+
+// ContextClipRectangle
+#define PUSH006C_SUBC_NV019 0
+
+// ContextRop
+#define PUSH006C_SUBC_NV043 0
+
+// ContextPattern
+#define PUSH006C_SUBC_NV044 0
+
+// Misc dodginess...
+#define PUSH006C_SUBC_NV_SW 1
+
+// ImageBlit
+#define PUSH006C_SUBC_NV05F 2
+#define PUSH006C_SUBC_NV09F 2
+
+// GdiRectangleText
+#define PUSH006C_SUBC_NV04A 3
+
+// Twod
+#define PUSH006C_SUBC_NV502D 3
+
+// MemoryToMemoryFormat
+#define PUSH006C_SUBC_NV039 4
+#define PUSH006C_SUBC_NV5039 4
+
+// DmaCopy
+#define PUSH006C_SUBC_NV85B5 4
+
+// Cipher
+#define PUSH006C_SUBC_NV74C1 4
+#endif
+
+#define PUSH_HDR(p,o,n,s,m,c) do { \
+ PUSH_ASSERT(!((s) & ~DRF_MASK(NV06C_METHOD_SUBCHANNEL)), "subc"); \
+ PUSH_ASSERT(!((m) & ~DRF_SMASK(NV06C_METHOD_ADDRESS)), "mthd"); \
+ PUSH_ASSERT(!((c) & ~DRF_MASK(NV06C_METHOD_COUNT)), "count"); \
+ PUSH_DATA__((p), NVVAL_X(NV06C_METHOD_ADDRESS, (m) >> 2) | \
+ NVVAL_X(NV06C_METHOD_SUBCHANNEL, (s)) | \
+ NVVAL_X(NV06C_METHOD_COUNT, (c)) | \
+ NVVAL_X(NV06C_OPCODE, NV06C_OPCODE_##o), \
+ " "n" subc %d mthd 0x%04x size %d - %s", \
+ (u32)(s), (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,c,m,n) PUSH_HDR(p, METHOD, "incr", PUSH006C_SUBC_##c, m, n)
+#define PUSH_MTHD_INC 4:4
+#define PUSH_NINC_HDR(p,c,m,n) PUSH_HDR(p, NONINC_METHOD, "ninc", PUSH006C_SUBC_##c, m, n)
+#define PUSH_NINC_INC 0:0
+
+#define PUSH_JUMP(p,o) do { \
+ PUSH_ASSERT(!((o) & ~0x1fffffffcULL), "offset"); \
+ PUSH_DATA__((p), NVVAL_X(NV06C_OPCODE, NV06C_OPCODE_JUMP) | \
+ NVVAL_X(NV06C_JUMP_OFFSET, (o) >> 2), \
+ " jump 0x%08x - %s", (u32)(o), __func__); \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push206e.h b/drivers/gpu/drm/nouveau/include/nvif/push206e.h
new file mode 100644
index 000000000000..1dfb8a35423e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/push206e.h
@@ -0,0 +1,13 @@
+#ifndef __NVIF_PUSH206E_H__
+#define __NVIF_PUSH206E_H__
+#include <nvif/push006c.h>
+
+#include <nvhw/class/cl206e.h>
+
+#define PUSH_CALL(p,o) do { \
+ PUSH_ASSERT(!((o) & ~0xffffffffcULL), "offset"); \
+ PUSH_DATA__((p), NVDEF(NV206E, DMA, OPCODE2, CALL) | \
+ NVVAL(NV206E, DMA, CALL_OFFSET, (o) >> 2), \
+ " call 0x%08x - %s", (u32)(o), __func__); \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push507c.h b/drivers/gpu/drm/nouveau/include/nvif/push507c.h
new file mode 100644
index 000000000000..889467f13fd9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/push507c.h
@@ -0,0 +1,25 @@
+#ifndef __NVIF_PUSH507C_H__
+#define __NVIF_PUSH507C_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/cl507c.h>
+
+#define PUSH_HDR(p,m,c) do { \
+ PUSH_ASSERT(!((m) & ~DRF_SMASK(NV507C_DMA_METHOD_OFFSET)), "mthd"); \
+ PUSH_ASSERT(!((c) & ~DRF_MASK(NV507C_DMA_METHOD_COUNT)), "size"); \
+ PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, METHOD) | \
+ NVVAL(NV507C, DMA, METHOD_COUNT, (c)) | \
+ NVVAL(NV507C, DMA, METHOD_OFFSET, (m) >> 2), \
+ " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c)
+#define PUSH_MTHD_INC 4:4
+
+#define PUSH_JUMP(p,o) do { \
+ PUSH_ASSERT(!((o) & ~DRF_SMASK(NV507C_DMA_JUMP_OFFSET)), "offset"); \
+ PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, JUMP) | \
+ NVVAL(NV507C, DMA, JUMP_OFFSET, (o) >> 2), \
+ "jump 0x%08x - %s", (u32)(o), __func__); \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push906f.h b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
new file mode 100644
index 000000000000..cc2866bc8b0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
@@ -0,0 +1,48 @@
+#ifndef __NVIF_PUSH906F_H__
+#define __NVIF_PUSH906F_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/cl906f.h>
+
+#ifndef PUSH906F_SUBC
+// Host methods
+#define PUSH906F_SUBC_NV906F 0
+
+// Twod
+#define PUSH906F_SUBC_NV902D 3
+
+// MemoryToMemoryFormat
+#define PUSH906F_SUBC_NV9039 4
+
+// DmaCopy
+#define PUSH906F_SUBC_NV90B5 4
+#define PUSH906F_SUBC_NVA0B5 4
+#endif
+
+#define PUSH_HDR(p,o,n,f,s,m,c) do { \
+ PUSH_ASSERT(!((s) & ~DRF_MASK(NV906F_DMA_METHOD_SUBCHANNEL)), "subc"); \
+ PUSH_ASSERT(!((m) & ~(DRF_MASK(NV906F_DMA_METHOD_ADDRESS) << 2)), "mthd"); \
+ PUSH_ASSERT(!((c) & ~DRF_MASK(NV906F_DMA_METHOD_COUNT)), "count/immd"); \
+ PUSH_DATA__((p), NVVAL(NV906F, DMA, METHOD_ADDRESS, (m) >> 2) | \
+ NVVAL(NV906F, DMA, METHOD_SUBCHANNEL, (s)) | \
+ NVVAL(NV906F, DMA, METHOD_COUNT, (c)) | \
+ NVDEF(NV906F, DMA, SEC_OP, o), \
+ " "n" subc %d mthd 0x%04x "f" - %s", \
+ (u32)(s), (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_INC 4:4
+#define PUSH_MTHD_HDR(p,c,m,n) \
+ PUSH_HDR(p, INC_METHOD, "incr", "size %d", PUSH906F_SUBC_##c, m, n)
+
+#define PUSH_NINC_INC 0:0
+#define PUSH_NINC_HDR(p,c,m,n) \
+ PUSH_HDR(p, NON_INC_METHOD, "ninc", "size %d", PUSH906F_SUBC_##c, m, n)
+
+#define PUSH_IMMD_HDR(p,c,m,n) \
+ PUSH_HDR(p, IMMD_DATA_METHOD, "immd", "data 0x%04x", PUSH906F_SUBC_##c, m, n)
+
+#define PUSH_1INC_INC 4:0
+#define PUSH_1INC_HDR(p,c,m,n) \
+ PUSH_HDR(p, ONE_INC, "oinc", "size %d", PUSH906F_SUBC_##c, m, n)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/pushc37b.h b/drivers/gpu/drm/nouveau/include/nvif/pushc37b.h
new file mode 100644
index 000000000000..8f0c45703e33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/pushc37b.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_PUSHC37B_H__
+#define __NVIF_PUSHC37B_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/clc37b.h>
+
+#define PUSH_HDR(p,m,c) do { \
+ PUSH_ASSERT(!((m) & ~DRF_SMASK(NVC37B_DMA_METHOD_OFFSET)), "mthd"); \
+ PUSH_ASSERT(!((c) & ~DRF_MASK(NVC37B_DMA_METHOD_COUNT)), "size"); \
+ PUSH_DATA__((p), NVDEF(NVC37B, DMA, OPCODE, METHOD) | \
+ NVVAL(NVC37B, DMA, METHOD_COUNT, (c)) | \
+ NVVAL(NVC37B, DMA, METHOD_OFFSET, (m) >> 2), \
+ " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c)
+#define PUSH_MTHD_INC 4:4
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/user.h b/drivers/gpu/drm/nouveau/include/nvif/user.h
index 6825574d93c2..146986a9fe53 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/user.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/user.h
@@ -13,8 +13,8 @@ struct nvif_user_func {
u64 (*time)(struct nvif_user *);
};
-int nvif_user_init(struct nvif_device *);
-void nvif_user_fini(struct nvif_device *);
+int nvif_user_ctor(struct nvif_device *, const char *name);
+void nvif_user_dtor(struct nvif_device *);
extern const struct nvif_user_func nvif_userc361;
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
index 79bf85d2f43a..a2ee92201ace 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/vmm.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -30,9 +30,9 @@ struct nvif_vmm {
int page_nr;
};
-int nvif_vmm_init(struct nvif_mmu *, s32 oclass, bool managed, u64 addr,
- u64 size, void *argv, u32 argc, struct nvif_vmm *);
-void nvif_vmm_fini(struct nvif_vmm *);
+int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed,
+ u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *);
+void nvif_vmm_dtor(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *);
void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
index daa8e4bfb6bf..3981cb106aae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -31,21 +31,21 @@ int gp102_sec2_flcn_enable(struct nvkm_falcon *);
#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
/**
- * struct nv_falcon_msg - header for all messages
+ * struct nvfw_falcon_msg - header for all messages
*
* @unit_id: id of firmware process that sent the message
* @size: total size of message
* @ctrl_flags: control flags
* @seq_id: used to match a message from its corresponding command
*/
-struct nv_falcon_msg {
+struct nvfw_falcon_msg {
u8 unit_id;
u8 size;
u8 ctrl_flags;
u8 seq_id;
};
-#define nv_falcon_cmd nv_falcon_msg
+#define nvfw_falcon_cmd nvfw_falcon_msg
#define NV_FALCON_CMD_UNIT_ID_REWIND 0x00
struct nvkm_falcon_qmgr;
@@ -53,7 +53,7 @@ int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **);
void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **);
typedef int
-(*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *);
+(*nvkm_falcon_qmgr_callback)(void *priv, struct nvfw_falcon_msg *);
struct nvkm_falcon_cmdq;
int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name,
@@ -62,7 +62,7 @@ void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **);
void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *,
u32 index, u32 offset, u32 size);
void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *);
-int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *,
+int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nvfw_falcon_cmd *,
nvkm_falcon_qmgr_callback, void *priv,
unsigned long timeout_jiffies);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index d14b7fb07368..85bcb80f6873 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -45,11 +45,8 @@ int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path,
} \
} \
\
- if (_ret) { \
- nvkm_error(_s, "failed to load firmware\n"); \
+ if (_ret) \
_fwif = ERR_PTR(_ret); \
- } \
- \
_fwif; \
})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
index 5d9c3a966de6..836d8b932822 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
@@ -39,6 +39,8 @@ struct nvkm_acr {
struct list_head hsfw, hsf;
struct list_head lsfw, lsf;
+ u64 managed_falcons;
+
struct nvkm_memory *wpr;
u64 wpr_start;
u64 wpr_end;
@@ -107,6 +109,7 @@ struct nvkm_acr_lsf_func {
void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *);
void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust);
int (*boot)(struct nvkm_falcon *);
+ u64 bootstrap_falcons;
int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask);
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index da553089d2d8..5ff6d1f8985a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -47,8 +47,8 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gm200_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index a8c21c6c800b..d06dcbe1faa6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -63,8 +63,8 @@ s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
dev_WARN(_wait.tmr->subdev.device->dev, "timeout\n"); \
_taken; \
})
-#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
-#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond)
+#define nvkm_usec(d, u, cond...) nvkm_nsec((d), (u) * 1000ULL, ##cond)
+#define nvkm_msec(d, m, cond...) nvkm_usec((d), (m) * 1000ULL, ##cond)
#define nvkm_wait_nsec(d,n,addr,mask,data) \
nvkm_nsec(d, n, \
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 5b2406950e53..21537ca1dd39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -55,8 +55,8 @@ nouveau_abi16(struct drm_file *file_priv)
* device (ie. the one that belongs to the fd it
* opened)
*/
- if (nvif_device_init(&cli->base.object, 0, NV_DEVICE,
- &args, sizeof(args),
+ if (nvif_device_ctor(&cli->base.object, "abi16Device",
+ 0, NV_DEVICE, &args, sizeof(args),
&abi16->device) == 0)
return cli->abi16;
@@ -114,7 +114,7 @@ static void
nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
struct nouveau_abi16_ntfy *ntfy)
{
- nvif_object_fini(&ntfy->object);
+ nvif_object_dtor(&ntfy->object);
nvkm_mm_free(&chan->heap, &ntfy->node);
list_del(&ntfy->head);
kfree(ntfy);
@@ -167,7 +167,7 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
}
/* destroy the device object */
- nvif_device_fini(&abi16->device);
+ nvif_device_dtor(&abi16->device);
kfree(cli->abi16);
cli->abi16 = NULL;
@@ -502,8 +502,8 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
list_add(&ntfy->head, &chan->notifiers);
client->route = NVDRM_OBJECT_ABI16;
- ret = nvif_object_init(&chan->chan->user, init->handle, oclass,
- NULL, 0, &ntfy->object);
+ ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
+ oclass, NULL, 0, &ntfy->object);
client->route = NVDRM_OBJECT_NVIF;
if (ret)
@@ -569,7 +569,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
client->route = NVDRM_OBJECT_ABI16;
client->super = true;
- ret = nvif_object_init(&chan->chan->user, info->handle,
+ ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&ntfy->object);
client->super = false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4ccf937df0d0..7806278dce57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -31,7 +31,7 @@
#include <linux/swiotlb.h>
#include "nouveau_drv.h"
-#include "nouveau_dma.h"
+#include "nouveau_chan.h"
#include "nouveau_fence.h"
#include "nouveau_bo.h"
@@ -724,360 +724,6 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
*pl = nvbo->placement;
}
-
-static int
-nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
- int ret = RING_SPACE(chan, 2);
- if (ret == 0) {
- BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle & 0x0000ffff);
- FIRE_RING (chan);
- }
- return ret;
-}
-
-static int
-nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
- struct nouveau_mem *mem = nouveau_mem(old_reg);
- int ret = RING_SPACE(chan, 10);
- if (ret == 0) {
- BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
- OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
- OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
- OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
- OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, new_reg->num_pages);
- BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
- }
- return ret;
-}
-
-static int
-nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
- int ret = RING_SPACE(chan, 2);
- if (ret == 0) {
- BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
- }
- return ret;
-}
-
-static int
-nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
- struct nouveau_mem *mem = nouveau_mem(old_reg);
- u64 src_offset = mem->vma[0].addr;
- u64 dst_offset = mem->vma[1].addr;
- u32 page_count = new_reg->num_pages;
- int ret;
-
- page_count = new_reg->num_pages;
- while (page_count) {
- int line_count = (page_count > 8191) ? 8191 : page_count;
-
- ret = RING_SPACE(chan, 11);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
- OUT_RING (chan, upper_32_bits(src_offset));
- OUT_RING (chan, lower_32_bits(src_offset));
- OUT_RING (chan, upper_32_bits(dst_offset));
- OUT_RING (chan, lower_32_bits(dst_offset));
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, line_count);
- BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
- OUT_RING (chan, 0x00000110);
-
- page_count -= line_count;
- src_offset += (PAGE_SIZE * line_count);
- dst_offset += (PAGE_SIZE * line_count);
- }
-
- return 0;
-}
-
-static int
-nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
- struct nouveau_mem *mem = nouveau_mem(old_reg);
- u64 src_offset = mem->vma[0].addr;
- u64 dst_offset = mem->vma[1].addr;
- u32 page_count = new_reg->num_pages;
- int ret;
-
- page_count = new_reg->num_pages;
- while (page_count) {
- int line_count = (page_count > 2047) ? 2047 : page_count;
-
- ret = RING_SPACE(chan, 12);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
- OUT_RING (chan, upper_32_bits(dst_offset));
- OUT_RING (chan, lower_32_bits(dst_offset));
- BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
- OUT_RING (chan, upper_32_bits(src_offset));
- OUT_RING (chan, lower_32_bits(src_offset));
- OUT_RING (chan, PAGE_SIZE); /* src_pitch */
- OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
- OUT_RING (chan, PAGE_SIZE); /* line_length */
- OUT_RING (chan, line_count);
- BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
- OUT_RING (chan, 0x00100110);
-
- page_count -= line_count;
- src_offset += (PAGE_SIZE * line_count);
- dst_offset += (PAGE_SIZE * line_count);
- }
-
- return 0;
-}
-
-static int
-nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
- struct nouveau_mem *mem = nouveau_mem(old_reg);
- u64 src_offset = mem->vma[0].addr;
- u64 dst_offset = mem->vma[1].addr;
- u32 page_count = new_reg->num_pages;
- int ret;
-
- page_count = new_reg->num_pages;
- while (page_count) {
- int line_count = (page_count > 8191) ? 8191 : page_count;
-
- ret = RING_SPACE(chan, 11);
- if (ret)
- return ret;
-
- BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
- OUT_RING (chan, upper_32_bits(src_offset));
- OUT_RING (chan, lower_32_bits(src_offset));
- OUT_RING (chan, upper_32_bits(dst_offset));
- OUT_RING (chan, lower_32_bits(dst_offset));
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, line_count);
- BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
- OUT_RING (chan, 0x00000110);
-
- page_count -= line_count;
- src_offset += (PAGE_SIZE * line_count);
- dst_offset += (PAGE_SIZE * line_count);
- }
-
- return 0;
-}
-
-static int
-nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
- struct nouveau_mem *mem = nouveau_mem(old_reg);
- int ret = RING_SPACE(chan, 7);
- if (ret == 0) {
- BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
- OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
- OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
- OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
- OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
- OUT_RING (chan, 0x00000000 /* COPY */);
- OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
- }
- return ret;
-}
-
-static int
-nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
- struct nouveau_mem *mem = nouveau_mem(old_reg);
- int ret = RING_SPACE(chan, 7);
- if (ret == 0) {
- BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
- OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
- OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
- OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
- OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
- OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
- OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
- }
- return ret;
-}
-
-static int
-nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
- int ret = RING_SPACE(chan, 6);
- if (ret == 0) {
- BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
- BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
- OUT_RING (chan, chan->drm->ntfy.handle);
- OUT_RING (chan, chan->vram.handle);
- OUT_RING (chan, chan->vram.handle);
- }
-
- return ret;
-}
-
-static int
-nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
- struct nouveau_mem *mem = nouveau_mem(old_reg);
- u64 length = (new_reg->num_pages << PAGE_SHIFT);
- u64 src_offset = mem->vma[0].addr;
- u64 dst_offset = mem->vma[1].addr;
- int src_tiled = !!mem->kind;
- int dst_tiled = !!nouveau_mem(new_reg)->kind;
- int ret;
-
- while (length) {
- u32 amount, stride, height;
-
- ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
- if (ret)
- return ret;
-
- amount = min(length, (u64)(4 * 1024 * 1024));
- stride = 16 * 4;
- height = amount / stride;
-
- if (src_tiled) {
- BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- OUT_RING (chan, stride);
- OUT_RING (chan, height);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- } else {
- BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
- OUT_RING (chan, 1);
- }
- if (dst_tiled) {
- BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- OUT_RING (chan, stride);
- OUT_RING (chan, height);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- } else {
- BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
- OUT_RING (chan, 1);
- }
-
- BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
- OUT_RING (chan, upper_32_bits(src_offset));
- OUT_RING (chan, upper_32_bits(dst_offset));
- BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
- OUT_RING (chan, lower_32_bits(src_offset));
- OUT_RING (chan, lower_32_bits(dst_offset));
- OUT_RING (chan, stride);
- OUT_RING (chan, stride);
- OUT_RING (chan, stride);
- OUT_RING (chan, height);
- OUT_RING (chan, 0x00000101);
- OUT_RING (chan, 0x00000000);
- BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
- OUT_RING (chan, 0);
-
- length -= amount;
- src_offset += amount;
- dst_offset += amount;
- }
-
- return 0;
-}
-
-static int
-nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
-{
- int ret = RING_SPACE(chan, 4);
- if (ret == 0) {
- BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
- BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
- OUT_RING (chan, chan->drm->ntfy.handle);
- }
-
- return ret;
-}
-
-static inline uint32_t
-nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
- struct nouveau_channel *chan, struct ttm_mem_reg *reg)
-{
- if (reg->mem_type == TTM_PL_TT)
- return NvDmaTT;
- return chan->vram.handle;
-}
-
-static int
-nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
-{
- u32 src_offset = old_reg->start << PAGE_SHIFT;
- u32 dst_offset = new_reg->start << PAGE_SHIFT;
- u32 page_count = new_reg->num_pages;
- int ret;
-
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
-
- BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
-
- page_count = new_reg->num_pages;
- while (page_count) {
- int line_count = (page_count > 2047) ? 2047 : page_count;
-
- ret = RING_SPACE(chan, 11);
- if (ret)
- return ret;
-
- BEGIN_NV04(chan, NvSubCopy,
- NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
- OUT_RING (chan, src_offset);
- OUT_RING (chan, dst_offset);
- OUT_RING (chan, PAGE_SIZE); /* src_pitch */
- OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
- OUT_RING (chan, PAGE_SIZE); /* line_length */
- OUT_RING (chan, line_count);
- OUT_RING (chan, 0x00000101);
- OUT_RING (chan, 0x00000000);
- BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
- OUT_RING (chan, 0);
-
- page_count -= line_count;
- src_offset += (PAGE_SIZE * line_count);
- dst_offset += (PAGE_SIZE * line_count);
- }
-
- return 0;
-}
-
static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_mem_reg *reg)
@@ -1181,7 +827,6 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
{ "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
{ "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
{},
- { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
};
const struct _method_table *mthd = _methods;
const char *name = "CPU";
@@ -1197,14 +842,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
if (chan == NULL)
continue;
- ret = nvif_object_init(&chan->user,
+ ret = nvif_object_ctor(&chan->user, "ttmBoMove",
mthd->oclass | (mthd->engine << 16),
mthd->oclass, NULL, 0,
&drm->ttm.copy);
if (ret == 0) {
ret = mthd->init(chan, drm->ttm.copy.handle);
if (ret) {
- nvif_object_fini(&drm->ttm.copy);
+ nvif_object_dtor(&drm->ttm.copy);
continue;
}
@@ -1461,7 +1106,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
/* untiled */
break;
- /* fall through - tiled memory */
+ fallthrough; /* tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = reg->start << PAGE_SHIFT;
reg->bus.base = device->func->resource_addr(device, 1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index e944b4aa5547..52489ce7d029 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_BO_H__
#define __NOUVEAU_BO_H__
-
+#include <drm/ttm/ttm_bo_driver.h>
#include <drm/drm_gem.h>
struct nouveau_channel;
+struct nouveau_cli;
+struct nouveau_drm;
struct nouveau_fence;
-struct nvkm_vma;
struct nouveau_bo {
struct ttm_buffer_object bo;
@@ -135,4 +136,42 @@ nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
}
return ret;
}
+
+int nv04_bo_move_init(struct nouveau_channel *, u32);
+int nv04_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nv50_bo_move_init(struct nouveau_channel *, u32);
+int nv50_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nv84_bo_move_exec(struct nouveau_channel *, struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nva3_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nvc0_bo_move_init(struct nouveau_channel *, u32);
+int nvc0_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nvc0_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+int nve0_bo_move_init(struct nouveau_channel *, u32);
+int nve0_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+
+#define NVBO_WR32_(b,o,dr,f) nouveau_bo_wr32((b), (o)/4 + (dr), (f))
+#define NVBO_RD32_(b,o,dr) nouveau_bo_rd32((b), (o)/4 + (dr))
+#define NVBO_RD32(A...) DRF_RD(NVBO_RD32_, ##A)
+#define NVBO_RV32(A...) DRF_RV(NVBO_RD32_, ##A)
+#define NVBO_TV32(A...) DRF_TV(NVBO_RD32_, ##A)
+#define NVBO_TD32(A...) DRF_TD(NVBO_RD32_, ##A)
+#define NVBO_WR32(A...) DRF_WR( NVBO_WR32_, ##A)
+#define NVBO_WV32(A...) DRF_WV( NVBO_WR32_, ##A)
+#define NVBO_WD32(A...) DRF_WD( NVBO_WR32_, ##A)
+#define NVBO_MR32(A...) DRF_MR(NVBO_RD32_, NVBO_WR32_, u32, ##A)
+#define NVBO_MV32(A...) DRF_MV(NVBO_RD32_, NVBO_WR32_, u32, ##A)
+#define NVBO_MD32(A...) DRF_MD(NVBO_RD32_, NVBO_WR32_, u32, ##A)
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
new file mode 100644
index 000000000000..bf7ae2cecaf6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_drv.h"
+
+#include <nvif/push006c.h>
+
+#include <nvhw/class/cl0039.h>
+
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+ struct nouveau_channel *chan, struct ttm_mem_reg *reg)
+{
+ if (reg->mem_type == TTM_PL_TT)
+ return NvDmaTT;
+ return chan->vram.handle;
+}
+
+int
+nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+ struct nvif_push *push = chan->chan.push;
+ u32 src_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, old_reg);
+ u32 src_offset = old_reg->start << PAGE_SHIFT;
+ u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
+ u32 dst_offset = new_reg->start << PAGE_SHIFT;
+ u32 page_count = new_reg->num_pages;
+ int ret;
+
+ ret = PUSH_WAIT(push, 3);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma,
+ SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma);
+
+ page_count = new_reg->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ ret = PUSH_WAIT(push, 11);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV039, OFFSET_IN, src_offset,
+ OFFSET_OUT, dst_offset,
+ PITCH_IN, PAGE_SIZE,
+ PITCH_OUT, PAGE_SIZE,
+ LINE_LENGTH_IN, PAGE_SIZE,
+ LINE_COUNT, line_count,
+
+ FORMAT,
+ NVVAL(NV039, FORMAT, IN, 1) |
+ NVVAL(NV039, FORMAT, OUT, 1),
+
+ BUFFER_NOTIFY, NV039_BUFFER_NOTIFY_WRITE_ONLY);
+
+ PUSH_MTHD(push, NV039, NO_OPERATION, 0x00000000);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+int
+nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ struct nvif_push *push = chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV039, SET_OBJECT, handle);
+ PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->drm->ntfy.handle);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
new file mode 100644
index 000000000000..f9b9b85abe44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_drv.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push206e.h>
+
+#include <nvhw/class/cl5039.h>
+
+int
+nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ struct nvif_push *push = chan->chan.push;
+ u64 length = (new_reg->num_pages << PAGE_SHIFT);
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
+ int src_tiled = !!mem->kind;
+ int dst_tiled = !!nouveau_mem(new_reg)->kind;
+ int ret;
+
+ while (length) {
+ u32 amount, stride, height;
+
+ ret = PUSH_WAIT(push, 18 + 6 * (src_tiled + dst_tiled));
+ if (ret)
+ return ret;
+
+ amount = min(length, (u64)(4 * 1024 * 1024));
+ stride = 16 * 4;
+ height = amount / stride;
+
+ if (src_tiled) {
+ PUSH_MTHD(push, NV5039, SET_SRC_MEMORY_LAYOUT,
+ NVDEF(NV5039, SET_SRC_MEMORY_LAYOUT, V, BLOCKLINEAR),
+
+ SET_SRC_BLOCK_SIZE,
+ NVDEF(NV5039, SET_SRC_BLOCK_SIZE, WIDTH, ONE_GOB) |
+ NVDEF(NV5039, SET_SRC_BLOCK_SIZE, HEIGHT, ONE_GOB) |
+ NVDEF(NV5039, SET_SRC_BLOCK_SIZE, DEPTH, ONE_GOB),
+
+ SET_SRC_WIDTH, stride,
+ SET_SRC_HEIGHT, height,
+ SET_SRC_DEPTH, 1,
+ SET_SRC_LAYER, 0,
+
+ SET_SRC_ORIGIN,
+ NVVAL(NV5039, SET_SRC_ORIGIN, X, 0) |
+ NVVAL(NV5039, SET_SRC_ORIGIN, Y, 0));
+ } else {
+ PUSH_MTHD(push, NV5039, SET_SRC_MEMORY_LAYOUT,
+ NVDEF(NV5039, SET_SRC_MEMORY_LAYOUT, V, PITCH));
+ }
+
+ if (dst_tiled) {
+ PUSH_MTHD(push, NV5039, SET_DST_MEMORY_LAYOUT,
+ NVDEF(NV5039, SET_DST_MEMORY_LAYOUT, V, BLOCKLINEAR),
+
+ SET_DST_BLOCK_SIZE,
+ NVDEF(NV5039, SET_DST_BLOCK_SIZE, WIDTH, ONE_GOB) |
+ NVDEF(NV5039, SET_DST_BLOCK_SIZE, HEIGHT, ONE_GOB) |
+ NVDEF(NV5039, SET_DST_BLOCK_SIZE, DEPTH, ONE_GOB),
+
+ SET_DST_WIDTH, stride,
+ SET_DST_HEIGHT, height,
+ SET_DST_DEPTH, 1,
+ SET_DST_LAYER, 0,
+
+ SET_DST_ORIGIN,
+ NVVAL(NV5039, SET_DST_ORIGIN, X, 0) |
+ NVVAL(NV5039, SET_DST_ORIGIN, Y, 0));
+ } else {
+ PUSH_MTHD(push, NV5039, SET_DST_MEMORY_LAYOUT,
+ NVDEF(NV5039, SET_DST_MEMORY_LAYOUT, V, PITCH));
+ }
+
+ PUSH_MTHD(push, NV5039, OFFSET_IN_UPPER,
+ NVVAL(NV5039, OFFSET_IN_UPPER, VALUE, upper_32_bits(src_offset)),
+
+ OFFSET_OUT_UPPER,
+ NVVAL(NV5039, OFFSET_OUT_UPPER, VALUE, upper_32_bits(dst_offset)));
+
+ PUSH_MTHD(push, NV5039, OFFSET_IN, lower_32_bits(src_offset),
+ OFFSET_OUT, lower_32_bits(dst_offset),
+ PITCH_IN, stride,
+ PITCH_OUT, stride,
+ LINE_LENGTH_IN, stride,
+ LINE_COUNT, height,
+
+ FORMAT,
+ NVDEF(NV5039, FORMAT, IN, ONE) |
+ NVDEF(NV5039, FORMAT, OUT, ONE),
+
+ BUFFER_NOTIFY,
+ NVDEF(NV5039, BUFFER_NOTIFY, TYPE, WRITE_ONLY));
+
+ PUSH_MTHD(push, NV5039, NO_OPERATION, 0x00000000);
+
+ length -= amount;
+ src_offset += amount;
+ dst_offset += amount;
+ }
+
+ return 0;
+}
+
+int
+nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ struct nvif_push *push = chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV5039, SET_OBJECT, handle);
+ PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->drm->ntfy.handle,
+ SET_CONTEXT_DMA_BUFFER_IN, chan->vram.handle,
+ SET_CONTEXT_DMA_BUFFER_OUT, chan->vram.handle);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
new file mode 100644
index 000000000000..1b5fd78ddcba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push206e.h>
+
+int
+nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ struct nvif_push *push = chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 7);
+ if (ret)
+ return ret;
+
+ PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT,
+ 0x0308, upper_32_bits(mem->vma[0].addr),
+ 0x030c, lower_32_bits(mem->vma[0].addr),
+ 0x0310, upper_32_bits(mem->vma[1].addr),
+ 0x0314, lower_32_bits(mem->vma[1].addr),
+ 0x0318, 0x00000000 /* MODE_COPY, QUERY_NONE */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
new file mode 100644
index 000000000000..f0df172b029e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push206e.h>
+
+/*XXX: Fixup class to be compatible with NVIDIA's, which will allow sharing
+ * code with KeplerDmaCopyA.
+ */
+
+int
+nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ struct nvif_push *push = chan->chan.push;
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
+ u32 page_count = new_reg->num_pages;
+ int ret;
+
+ page_count = new_reg->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 8191) ? 8191 : page_count;
+
+ ret = PUSH_WAIT(push, 11);
+ if (ret)
+ return ret;
+
+ PUSH_NVSQ(push, NV85B5, 0x030c, upper_32_bits(src_offset),
+ 0x0310, lower_32_bits(src_offset),
+ 0x0314, upper_32_bits(dst_offset),
+ 0x0318, lower_32_bits(dst_offset),
+ 0x031c, PAGE_SIZE,
+ 0x0320, PAGE_SIZE,
+ 0x0324, PAGE_SIZE,
+ 0x0328, line_count);
+ PUSH_NVSQ(push, NV85B5, 0x0300, 0x00000110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo9039.c b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
new file mode 100644
index 000000000000..52fefb37064c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl9039.h>
+
+int
+nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+ struct nvif_push *push = chan->chan.push;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
+ u32 page_count = new_reg->num_pages;
+ int ret;
+
+ page_count = new_reg->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ ret = PUSH_WAIT(push, 12);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV9039, OFFSET_OUT_UPPER,
+ NVVAL(NV9039, OFFSET_OUT_UPPER, VALUE, upper_32_bits(dst_offset)),
+
+ OFFSET_OUT, lower_32_bits(dst_offset));
+
+ PUSH_MTHD(push, NV9039, OFFSET_IN_UPPER,
+ NVVAL(NV9039, OFFSET_IN_UPPER, VALUE, upper_32_bits(src_offset)),
+
+ OFFSET_IN, lower_32_bits(src_offset),
+ PITCH_IN, PAGE_SIZE,
+ PITCH_OUT, PAGE_SIZE,
+ LINE_LENGTH_IN, PAGE_SIZE,
+ LINE_COUNT, line_count);
+
+ PUSH_MTHD(push, NV9039, LAUNCH_DMA,
+ NVDEF(NV9039, LAUNCH_DMA, SRC_INLINE, FALSE) |
+ NVDEF(NV9039, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
+ NVDEF(NV9039, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
+ NVDEF(NV9039, LAUNCH_DMA, COMPLETION_TYPE, FLUSH_DISABLE) |
+ NVDEF(NV9039, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
+ NVDEF(NV9039, LAUNCH_DMA, SEMAPHORE_STRUCT_SIZE, ONE_WORD));
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+int
+nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ struct nvif_push *push = chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV9039, SET_OBJECT, handle);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
new file mode 100644
index 000000000000..34b79d561c7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push906f.h>
+
+/*XXX: Fixup class to be compatible with NVIDIA's, which will allow sharing
+ * code with KeplerDmaCopyA.
+ */
+
+int
+nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ struct nvif_push *push = chan->chan.push;
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
+ u32 page_count = new_reg->num_pages;
+ int ret;
+
+ page_count = new_reg->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 8191) ? 8191 : page_count;
+
+ ret = PUSH_WAIT(push, 10);
+ if (ret)
+ return ret;
+
+ PUSH_NVSQ(push, NV90B5, 0x030c, upper_32_bits(src_offset),
+ 0x0310, lower_32_bits(src_offset),
+ 0x0314, upper_32_bits(dst_offset),
+ 0x0318, lower_32_bits(dst_offset),
+ 0x031c, PAGE_SIZE,
+ 0x0320, PAGE_SIZE,
+ 0x0324, PAGE_SIZE,
+ 0x0328, line_count);
+ PUSH_NVIM(push, NV90B5, 0x0300, 0x0110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
new file mode 100644
index 000000000000..394e29012e50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+#include "nouveau_bo.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cla0b5.h>
+
+int
+nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ struct nvif_push *push = chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 10);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
+ NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(mem->vma[0].addr)),
+
+ OFFSET_IN_LOWER, lower_32_bits(mem->vma[0].addr),
+
+ OFFSET_OUT_UPPER,
+ NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(mem->vma[1].addr)),
+
+ OFFSET_OUT_LOWER, lower_32_bits(mem->vma[1].addr),
+ PITCH_IN, PAGE_SIZE,
+ PITCH_OUT, PAGE_SIZE,
+ LINE_LENGTH_IN, PAGE_SIZE,
+ LINE_COUNT, new_reg->num_pages);
+
+ PUSH_IMMD(push, NVA0B5, LAUNCH_DMA,
+ NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
+ NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
+ NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
+ NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING) |
+ NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, VIRTUAL) |
+ NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, VIRTUAL));
+ return 0;
+}
+
+int
+nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ struct nvif_push *push = chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_NVSQ(push, NVA0B5, 0x0000, handle & 0x0000ffff);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 3d71dfcb2fde..b80e4ebf14a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -21,8 +21,8 @@
*
* Authors: Ben Skeggs
*/
+#include <nvif/push006c.h>
-#include <nvif/os.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/cl006b.h>
@@ -32,9 +32,6 @@
#include <nvif/clc36f.h>
#include <nvif/ioctl.h>
-/*XXX*/
-#include <core/client.h>
-
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_bo.h"
@@ -102,12 +99,12 @@ nouveau_channel_del(struct nouveau_channel **pchan)
if (cli)
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
- nvif_object_fini(&chan->nvsw);
- nvif_object_fini(&chan->gart);
- nvif_object_fini(&chan->vram);
- nvif_notify_fini(&chan->kill);
- nvif_object_fini(&chan->user);
- nvif_object_fini(&chan->push.ctxdma);
+ nvif_object_dtor(&chan->nvsw);
+ nvif_object_dtor(&chan->gart);
+ nvif_object_dtor(&chan->vram);
+ nvif_notify_dtor(&chan->kill);
+ nvif_object_dtor(&chan->user);
+ nvif_object_dtor(&chan->push.ctxdma);
nouveau_vma_del(&chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->pin_refcnt)
@@ -121,6 +118,31 @@ nouveau_channel_del(struct nouveau_channel **pchan)
*pchan = NULL;
}
+static void
+nouveau_channel_kick(struct nvif_push *push)
+{
+ struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
+ chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+ FIRE_RING(chan);
+ chan->chan._push.bgn = chan->chan._push.cur;
+}
+
+static int
+nouveau_channel_wait(struct nvif_push *push, u32 size)
+{
+ struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
+ int ret;
+ chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+ ret = RING_SPACE(chan, size);
+ if (ret == 0) {
+ chan->chan._push.bgn = chan->chan._push.mem.object.map.ptr;
+ chan->chan._push.bgn = chan->chan._push.bgn + chan->dma.cur;
+ chan->chan._push.cur = chan->chan._push.bgn;
+ chan->chan._push.end = chan->chan._push.bgn + size;
+ }
+ return ret;
+}
+
static int
nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
u32 size, struct nouveau_channel **pchan)
@@ -158,6 +180,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
return ret;
}
+ chan->chan._push.mem.object.parent = cli->base.object.parent;
+ chan->chan._push.mem.object.client = &cli->base;
+ chan->chan._push.mem.object.name = "chanPush";
+ chan->chan._push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
+ chan->chan._push.wait = nouveau_channel_wait;
+ chan->chan._push.kick = nouveau_channel_kick;
+ chan->chan.push = &chan->chan._push;
+
/* create dma object covering the *entire* memory space that the
* pushbuf lives in, this is because the GEM code requires that
* we be able to call out to other (indirect) push buffers
@@ -214,8 +244,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
}
}
- ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
- &args, sizeof(args), &chan->push.ctxdma);
+ ret = nvif_object_ctor(&device->object, "abi16PushCtxDma", 0,
+ NV_DMA_FROM_MEMORY, &args, sizeof(args),
+ &chan->push.ctxdma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -290,8 +321,8 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
size = sizeof(args.nv50);
}
- ret = nvif_object_init(&device->object, 0, *oclass++,
- &args, size, &chan->user);
+ ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0,
+ *oclass++, &args, size, &chan->user);
if (ret == 0) {
if (chan->user.oclass >= VOLTA_CHANNEL_GPFIFO_A) {
chan->chid = args.volta.chid;
@@ -341,8 +372,9 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
args.offset = chan->push.addr;
do {
- ret = nvif_object_init(&device->object, 0, *oclass++,
- &args, sizeof(args), &chan->user);
+ ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0,
+ *oclass++, &args, sizeof(args),
+ &chan->user);
if (ret == 0) {
chan->chid = args.chid;
return ret;
@@ -364,7 +396,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
nvif_object_map(&chan->user, NULL, 0);
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
- ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
+ ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled",
+ nouveau_channel_killed,
true, NV906F_V0_NTFY_KILLED,
NULL, 0, 0, &chan->kill);
if (ret == 0)
@@ -390,8 +423,9 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.limit = device->info.ram_user - 1;
}
- ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
- &args, sizeof(args), &chan->vram);
+ ret = nvif_object_ctor(&chan->user, "abi16ChanVramCtxDma", vram,
+ NV_DMA_IN_MEMORY, &args, sizeof(args),
+ &chan->vram);
if (ret)
return ret;
@@ -414,8 +448,9 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.limit = chan->vmm->vmm.limit - 1;
}
- ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
- &args, sizeof(args), &chan->gart);
+ ret = nvif_object_ctor(&chan->user, "abi16ChanGartCtxDma", gart,
+ NV_DMA_IN_MEMORY, &args, sizeof(args),
+ &chan->gart);
if (ret)
return ret;
}
@@ -444,28 +479,27 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
- ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+ ret = PUSH_WAIT(chan->chan.push, NOUVEAU_DMA_SKIPS);
if (ret)
return ret;
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING(chan, 0x00000000);
+ PUSH_DATA(chan->chan.push, 0x00000000);
/* allocate software object class (used for fences on <= nv05) */
if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
- ret = nvif_object_init(&chan->user, 0x006e,
+ ret = nvif_object_ctor(&chan->user, "abi16NvswFence", 0x006e,
NVIF_CLASS_SW_NV04,
NULL, 0, &chan->nvsw);
if (ret)
return ret;
- ret = RING_SPACE(chan, 2);
+ ret = PUSH_WAIT(chan->chan.push, 2);
if (ret)
return ret;
- BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
- OUT_RING (chan, chan->nvsw.handle);
- FIRE_RING (chan);
+ PUSH_NVSQ(chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
+ PUSH_KICK(chan->chan.push);
}
/* initialise synchronisation */
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 9307357e1361..98ba9d27e6b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -3,9 +3,15 @@
#define __NOUVEAU_CHAN_H__
#include <nvif/object.h>
#include <nvif/notify.h>
+#include <nvif/push.h>
struct nvif_device;
struct nouveau_channel {
+ struct {
+ struct nvif_push _push;
+ struct nvif_push *push;
+ } chan;
+
struct nvif_device *device;
struct nouveau_drm *drm;
struct nouveau_vmm *vmm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ab2c2b2cab10..7674025a4bfe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -330,7 +330,7 @@ nouveau_conn_attach_properties(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_VGA:
if (disp->disp.object.oclass < NV50_DISP)
break; /* Can only scale on DFPs. */
- /* Fall-through. */
+ fallthrough;
default:
drm_object_attach_property(&connector->base, dev->mode_config.
scaling_mode_property,
@@ -409,7 +409,7 @@ static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
- nvif_notify_fini(&nv_connector->hpd);
+ nvif_notify_dtor(&nv_connector->hpd);
kfree(nv_connector->edid);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -445,7 +445,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
case DCB_OUTPUT_LVDS:
switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
VGA_SWITCHEROO_CAN_SWITCH_DDC);
- /* fall-through */
+ fallthrough;
default:
if (!nv_encoder->i2c)
break;
@@ -571,8 +571,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
pm_runtime_get_noresume(dev->dev);
} else {
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
return conn_status;
+ }
}
nv_encoder = nouveau_connector_ddc_detect(connector);
@@ -1448,7 +1450,8 @@ nouveau_connector_create(struct drm_device *dev,
break;
}
- ret = nvif_notify_init(&disp->disp.object, nouveau_connector_hotplug,
+ ret = nvif_notify_ctor(&disp->disp.object, "kmsHotplug",
+ nouveau_connector_hotplug,
true, NV04_DISP_NTFY_CONN,
&(struct nvif_notify_conn_req_v0) {
.mask = NVIF_NOTIFY_CONN_V0_ANY,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 9e062c7adec8..d6de5cb8e223 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -29,6 +29,10 @@
#include <nvif/notify.h>
+#include <nvhw/class/cl507d.h>
+#include <nvhw/class/cl907d.h>
+#include <nvhw/drf.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -56,16 +60,23 @@ struct nouveau_conn_atom {
* hw values, and the code relies on this.
*/
enum {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_OFF =
+ NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, ENABLE, DISABLE),
+ DITHERING_MODE_ON =
+ NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, ENABLE, ENABLE),
+ DITHERING_MODE_DYNAMIC2X2 = DITHERING_MODE_ON |
+ NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, MODE, DYNAMIC_2X2),
+ DITHERING_MODE_STATIC2X2 = DITHERING_MODE_ON |
+ NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, MODE, STATIC_2X2),
+ DITHERING_MODE_TEMPORAL = DITHERING_MODE_ON |
+ NVDEF(NV907D, HEAD_SET_DITHER_CONTROL, MODE, TEMPORAL),
DITHERING_MODE_AUTO
} mode;
enum {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_6BPC =
+ NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, BITS, DITHER_TO_6_BITS),
+ DITHERING_DEPTH_8BPC =
+ NVDEF(NV507D, HEAD_SET_DITHER_CONTROL, BITS, DITHER_TO_8_BITS),
DITHERING_DEPTH_AUTO
} depth;
} dither;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 63b5c8cf9ae4..c2bc05eb2e54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -54,8 +54,10 @@ nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
int ret;
ret = pm_runtime_get_sync(drm->dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(drm->dev->dev);
return ret;
+ }
seq_printf(m, "0x%08x\n",
nvif_rd32(&drm->client.device.object, 0x101000));
@@ -258,7 +260,7 @@ nouveau_debugfs_init(struct nouveau_drm *drm)
if (!drm->debugfs)
return -ENOMEM;
- ret = nvif_object_init(&drm->client.device.object, 0,
+ ret = nvif_object_ctor(&drm->client.device.object, "debugfsCtrl", 0,
NVIF_CLASS_CONTROL, NULL, 0,
&drm->debugfs->ctrl);
if (ret)
@@ -271,7 +273,7 @@ void
nouveau_debugfs_fini(struct nouveau_drm *drm)
{
if (drm->debugfs && drm->debugfs->ctrl.priv)
- nvif_object_fini(&drm->debugfs->ctrl);
+ nvif_object_dtor(&drm->debugfs->ctrl);
kfree(drm->debugfs);
drm->debugfs = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 657554cf011e..63c58f12458c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -635,7 +635,8 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_disable(dev);
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
- ret = nvif_disp_ctor(&drm->client.device, 0, &disp->disp);
+ ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0,
+ &disp->disp);
if (ret == 0) {
nouveau_display_create_properties(dev);
if (disp->disp.object.oclass < NV50_DISP)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 3c430a550a51..ddb75d80bc53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -30,19 +30,6 @@
#include <nvif/user.h>
-void
-OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
-{
- bool is_iomem;
- u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
- mem = &mem[chan->dma.cur];
- if (is_iomem)
- memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
- else
- memcpy(mem, data, nr_dwords * 4);
- chan->dma.cur += nr_dwords;
-}
-
/* Fetch and adjust GPU GET pointer
*
* Returns:
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index fc5e3f41282d..035a709c7be1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -45,17 +45,6 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
*/
#define NOUVEAU_DMA_SKIPS (128 / 4)
-/* Hardcoded object assignments to subchannels (subchannel id). */
-enum {
- NvSubCtxSurf2D = 0,
- NvSubSw = 1,
- NvSubImageBlit = 2,
- NvSubGdiRect = 3,
-
- NvSub2D = 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
- NvSubCopy = 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
-};
-
/* Object handles - for stuff that's doesn't use handle == oclass. */
enum {
NvDmaFB = 0x80000002,
@@ -66,23 +55,6 @@ enum {
NvEvoSema1 = 0x80000011,
};
-#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
-#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
-#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
-#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
-#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
-#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
-#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
-#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180
-#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184
-#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
-
-#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
-#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
-#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
-#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
-#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
-
static __must_check inline int
RING_SPACE(struct nouveau_channel *chan, int size)
{
@@ -102,39 +74,6 @@ OUT_RING(struct nouveau_channel *chan, int data)
nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
}
-extern void
-OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
-
-static inline void
-BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size)
-{
- OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd);
-}
-
-static inline void
-BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size)
-{
- OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd);
-}
-
-static inline void
-BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size)
-{
- OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2));
-}
-
-static inline void
-BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size)
-{
- OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2));
-}
-
-static inline void
-BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
-{
- OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2));
-}
-
#define WRITE_PUT(val) do { \
mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \
@@ -164,25 +103,6 @@ WIND_RING(struct nouveau_channel *chan)
chan->dma.cur = chan->dma.put;
}
-/* FIFO methods */
-#define NV01_SUBCHAN_OBJECT 0x00000000
-#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010
-#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014
-#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
-#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
-#define NV84_SUBCHAN_UEVENT 0x00000020
-#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
-#define NV10_SUBCHAN_REF_CNT 0x00000050
-#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
-#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
-#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
-#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c
-#define NV40_SUBCHAN_YIELD 0x00000080
-
/* NV_SW object class */
#define NV_SW_DMA_VBLSEM 0x0000018c
#define NV_SW_VBLSEM_OFFSET 0x00000400
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index af870671195a..98a1739e01e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -29,11 +29,14 @@
#include <nvif/class.h>
#include <nvif/object.h>
+#include <nvif/push906f.h>
#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
#include <nvif/if000c.h>
+#include <nvhw/class/cla0b5.h>
+
#include <linux/sched/mm.h>
#include <linux/hmm.h>
@@ -385,57 +388,72 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper dst_aper, u64 dst_addr,
enum nouveau_aper src_aper, u64 src_addr)
{
- struct nouveau_channel *chan = drm->dmem->migrate.chan;
- u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
- (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
- (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
- (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
- (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
+ struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ u32 launch_dma = 0;
int ret;
- ret = RING_SPACE(chan, 13);
+ ret = PUSH_WAIT(push, 13);
if (ret)
return ret;
if (src_aper != NOUVEAU_APER_VIRT) {
switch (src_aper) {
case NOUVEAU_APER_VRAM:
- BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
+ PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
+ NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
break;
case NOUVEAU_APER_HOST:
- BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
+ PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
+ NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
break;
default:
return -EINVAL;
}
- launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
+
+ launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
}
if (dst_aper != NOUVEAU_APER_VIRT) {
switch (dst_aper) {
case NOUVEAU_APER_VRAM:
- BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
+ PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
+ NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
break;
case NOUVEAU_APER_HOST:
- BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+ PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
+ NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
break;
default:
return -EINVAL;
}
- launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
+
+ launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
}
- BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
- OUT_RING (chan, upper_32_bits(src_addr));
- OUT_RING (chan, lower_32_bits(src_addr));
- OUT_RING (chan, upper_32_bits(dst_addr));
- OUT_RING (chan, lower_32_bits(dst_addr));
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, npages);
- BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
- OUT_RING (chan, launch_dma);
+ PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
+ NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
+
+ OFFSET_IN_LOWER, lower_32_bits(src_addr),
+
+ OFFSET_OUT_UPPER,
+ NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
+
+ OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
+ PITCH_IN, PAGE_SIZE,
+ PITCH_OUT, PAGE_SIZE,
+ LINE_LENGTH_IN, PAGE_SIZE,
+ LINE_COUNT, npages);
+
+ PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
+ NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
+ NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
+ NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
+ NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
return 0;
}
@@ -443,45 +461,55 @@ static int
nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
enum nouveau_aper dst_aper, u64 dst_addr)
{
- struct nouveau_channel *chan = drm->dmem->migrate.chan;
- u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
- (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
- (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
- (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
- (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
- u32 remap = (4 << 0) /* DST_X_CONST_A */ |
- (5 << 4) /* DST_Y_CONST_B */ |
- (3 << 16) /* COMPONENT_SIZE_FOUR */ |
- (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
+ struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ u32 launch_dma = 0;
int ret;
- ret = RING_SPACE(chan, 12);
+ ret = PUSH_WAIT(push, 12);
if (ret)
return ret;
switch (dst_aper) {
case NOUVEAU_APER_VRAM:
- BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
- break;
+ PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
+ NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
+ break;
case NOUVEAU_APER_HOST:
- BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+ PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
+ NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
break;
default:
return -EINVAL;
}
- launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
-
- BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
- OUT_RING(chan, 0);
- OUT_RING(chan, 0);
- OUT_RING(chan, remap);
- BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
- OUT_RING(chan, upper_32_bits(dst_addr));
- OUT_RING(chan, lower_32_bits(dst_addr));
- BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
- OUT_RING(chan, length >> 3);
- BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
- OUT_RING(chan, launch_dma);
+
+ launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
+
+ PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
+ SET_REMAP_CONST_B, 0,
+
+ SET_REMAP_COMPONENTS,
+ NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
+ NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
+ NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
+ NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
+
+ PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
+ NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
+
+ OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
+
+ PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
+
+ PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
+ NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
+ NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
+ NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
+ NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
+ NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
return 0;
}
@@ -550,7 +578,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
goto out_free_page;
- if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+ if (drm->dmem->migrate.copy_func(drm, 1,
NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
goto out_dma_unmap;
} else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ac93d12201dc..22d246acc5e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -41,6 +41,7 @@
#include <nvif/driver.h>
#include <nvif/fifo.h>
+#include <nvif/push006c.h>
#include <nvif/user.h>
#include <nvif/class.h>
@@ -178,10 +179,10 @@ nouveau_cli_fini(struct nouveau_cli *cli)
usif_client_fini(cli);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
- nvif_mmu_fini(&cli->mmu);
- nvif_device_fini(&cli->device);
+ nvif_mmu_dtor(&cli->mmu);
+ nvif_device_dtor(&cli->device);
mutex_lock(&cli->drm->master.lock);
- nvif_client_fini(&cli->base);
+ nvif_client_dtor(&cli->base);
mutex_unlock(&cli->drm->master.lock);
}
@@ -229,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
cli->name, device, &cli->base);
} else {
mutex_lock(&drm->master.lock);
- ret = nvif_client_init(&drm->master.base, cli->name, device,
+ ret = nvif_client_ctor(&drm->master.base, cli->name, device,
&cli->base);
mutex_unlock(&drm->master.lock);
}
@@ -238,7 +239,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
goto done;
}
- ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE,
+ ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
&(struct nv_device_v0) {
.device = ~0,
}, sizeof(struct nv_device_v0),
@@ -254,7 +255,8 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
goto done;
}
- ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
+ ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass,
+ &cli->mmu);
if (ret) {
NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
goto done;
@@ -290,7 +292,7 @@ static void
nouveau_accel_ce_fini(struct nouveau_drm *drm)
{
nouveau_channel_idle(drm->cechan);
- nvif_object_fini(&drm->ttm.copy);
+ nvif_object_dtor(&drm->ttm.copy);
nouveau_channel_del(&drm->cechan);
}
@@ -328,9 +330,8 @@ static void
nouveau_accel_gr_fini(struct nouveau_drm *drm)
{
nouveau_channel_idle(drm->channel);
- nvif_object_fini(&drm->ntfy);
+ nvif_object_dtor(&drm->ntfy);
nvkm_gpuobj_del(&drm->notify);
- nvif_object_fini(&drm->nvsw);
nouveau_channel_del(&drm->channel);
}
@@ -362,16 +363,15 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
* synchronisation of page flips, as well as to implement fences
* on TNT/TNT2 HW that lacks any kind of support in host.
*/
- if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
- ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
- nouveau_abi16_swclass(drm), NULL, 0,
- &drm->nvsw);
+ if (!drm->channel->nvsw.client && device->info.family < NV_DEVICE_INFO_V0_TESLA) {
+ ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
+ NVDRM_NVSW, nouveau_abi16_swclass(drm),
+ NULL, 0, &drm->channel->nvsw);
if (ret == 0) {
- ret = RING_SPACE(drm->channel, 2);
- if (ret == 0) {
- BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
- OUT_RING (drm->channel, drm->nvsw.handle);
- }
+ struct nvif_push *push = drm->channel->chan.push;
+ ret = PUSH_WAIT(push, 2);
+ if (ret == 0)
+ PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
}
if (ret) {
@@ -394,8 +394,8 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
return;
}
- ret = nvif_object_init(&drm->channel->user, NvNotify0,
- NV_DMA_IN_MEMORY,
+ ret = nvif_object_ctor(&drm->channel->user, "drmM2mfNtfy",
+ NvNotify0, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
@@ -482,7 +482,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
/* Volta requires access to a doorbell register for kickoff. */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
- ret = nvif_user_init(device);
+ ret = nvif_user_ctor(device, "drmUsermode");
if (ret)
return;
}
@@ -495,6 +495,40 @@ nouveau_accel_init(struct nouveau_drm *drm)
nouveau_bo_move_init(drm);
}
+static void __printf(2, 3)
+nouveau_drm_errorf(struct nvif_object *object, const char *fmt, ...)
+{
+ struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
+ struct va_format vaf;
+ va_list va;
+
+ va_start(va, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va;
+ NV_ERROR(drm, "%pV", &vaf);
+ va_end(va);
+}
+
+static void __printf(2, 3)
+nouveau_drm_debugf(struct nvif_object *object, const char *fmt, ...)
+{
+ struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
+ struct va_format vaf;
+ va_list va;
+
+ va_start(va, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va;
+ NV_DEBUG(drm, "%pV", &vaf);
+ va_end(va);
+}
+
+static const struct nvif_parent_func
+nouveau_parent = {
+ .debugf = nouveau_drm_debugf,
+ .errorf = nouveau_drm_errorf,
+};
+
static int
nouveau_drm_device_init(struct drm_device *dev)
{
@@ -506,6 +540,9 @@ nouveau_drm_device_init(struct drm_device *dev)
dev->dev_private = drm;
drm->dev = dev;
+ nvif_parent_ctor(&nouveau_parent, &drm->parent);
+ drm->master.base.object.parent = &drm->parent;
+
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
goto fail_alloc;
@@ -582,6 +619,7 @@ fail_ttm:
fail_master:
nouveau_cli_fini(&drm->master);
fail_alloc:
+ nvif_parent_dtor(&drm->parent);
kfree(drm);
return ret;
}
@@ -615,6 +653,7 @@ nouveau_drm_device_fini(struct drm_device *dev)
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
+ nvif_parent_dtor(&drm->parent);
kfree(drm);
}
@@ -1026,8 +1065,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
/* need to bring up power immediately if opening device */
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
return ret;
+ }
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
@@ -1109,8 +1150,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long ret;
ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev->dev);
return ret;
+ }
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
case DRM_NOUVEAU_NVIF:
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 2a6519737800..ae76a5865a5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -132,8 +132,10 @@ nouveau_cli(struct drm_file *fpriv)
}
#include <nvif/object.h>
+#include <nvif/parent.h>
struct nouveau_drm {
+ struct nvif_parent parent;
struct nouveau_cli master;
struct nouveau_cli client;
struct drm_device *dev;
@@ -184,7 +186,6 @@ struct nouveau_drm {
struct nouveau_channel *channel;
struct nvkm_gpuobj *notify;
struct nouveau_fbdev *fbcon;
- struct nvif_object nvsw;
struct nvif_object ntfy;
/* nv10-nv40 tiling regions */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f9f5a13fdb80..9eb6085c8625 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user)
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
int ret = pm_runtime_get_sync(drm->dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put(drm->dev->dev);
return ret;
+ }
return 0;
}
@@ -254,13 +256,13 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
console_unlock();
nouveau_channel_idle(drm->channel);
- nvif_object_fini(&fbcon->twod);
- nvif_object_fini(&fbcon->blit);
- nvif_object_fini(&fbcon->gdi);
- nvif_object_fini(&fbcon->patt);
- nvif_object_fini(&fbcon->rop);
- nvif_object_fini(&fbcon->clip);
- nvif_object_fini(&fbcon->surf2d);
+ nvif_object_dtor(&fbcon->twod);
+ nvif_object_dtor(&fbcon->blit);
+ nvif_object_dtor(&fbcon->gdi);
+ nvif_object_dtor(&fbcon->patt);
+ nvif_object_dtor(&fbcon->rop);
+ nvif_object_dtor(&fbcon->clip);
+ nvif_object_dtor(&fbcon->surf2d);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 666f2090d92b..e5dcbf67de7e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -108,7 +108,7 @@ void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
nouveau_fence_context_kill(fctx, 0);
- nvif_notify_fini(&fctx->notify);
+ nvif_notify_dtor(&fctx->notify);
fctx->dead = 1;
/*
@@ -195,7 +195,8 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
if (!priv->uevent)
return;
- ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
+ ret = nvif_notify_ctor(&chan->user, "fenceNonStallIntr",
+ nouveau_fence_wait_uevent_handler,
false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
&(struct nvif_notify_uevent_req) { },
sizeof(struct nvif_notify_uevent_req),
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 63b832585390..81f111ad3f4f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -35,6 +35,7 @@
#include "nouveau_vmm.h"
#include <nvif/class.h>
+#include <nvif/push206e.h>
void
nouveau_gem_object_del(struct drm_gem_object *gem)
@@ -45,8 +46,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int ret;
ret = pm_runtime_get_sync(dev);
- if (WARN_ON(ret < 0 && ret != -EACCES))
+ if (WARN_ON(ret < 0 && ret != -EACCES)) {
+ pm_runtime_put_autosuspend(dev);
return;
+ }
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -797,7 +800,7 @@ revalidate:
}
} else
if (drm->client.device.info.chipset >= 0x25) {
- ret = RING_SPACE(chan, req->nr_push * 2);
+ ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
if (ret) {
NV_PRINTK(err, cli, "cal_space: %d\n", ret);
goto out;
@@ -807,11 +810,11 @@ revalidate:
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
- OUT_RING(chan, (nvbo->offset + push[i].offset) | 2);
- OUT_RING(chan, 0);
+ PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
+ PUSH_DATA(chan->chan.push, 0);
}
} else {
- ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
+ ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
goto out;
@@ -841,11 +844,10 @@ revalidate:
push[i].length - 8) / 4, cmd);
}
- OUT_RING(chan, 0x20000000 |
- (nvbo->offset + push[i].offset));
- OUT_RING(chan, 0);
+ PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
+ PUSH_DATA(chan->chan.push, 0);
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
- OUT_RING(chan, 0);
+ PUSH_DATA(chan->chan.push, 0);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index c002f8968507..b1bb542d3115 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -87,7 +87,7 @@ nouveau_mem_fini(struct nouveau_mem *mem)
nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
mutex_lock(&mem->cli->drm->master.lock);
- nvif_mem_fini(&mem->mem);
+ nvif_mem_dtor(&mem->mem);
mutex_unlock(&mem->cli->drm->master.lock);
}
@@ -121,7 +121,7 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
mutex_lock(&drm->master.lock);
cli->base.super = true;
- ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT,
+ ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
reg->num_pages << PAGE_SHIFT,
&args, sizeof(args), &mem->mem);
cli->base.super = super;
@@ -144,7 +144,7 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
cli->base.super = true;
switch (cli->mem->oclass) {
case NVIF_CLASS_MEM_GF100:
- ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+ ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
drm->ttm.type_vram, page, size,
&(struct gf100_mem_v0) {
.contig = contig,
@@ -152,7 +152,7 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
&mem->mem);
break;
case NVIF_CLASS_MEM_NV50:
- ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+ ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
drm->ttm.type_vram, page, size,
&(struct nv50_mem_v0) {
.bankswz = mmu->kind[mem->kind] == 2,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index feaac908efed..c3ccf661b7a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -46,12 +46,11 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
return 0;
}
-static int
+static void
nv04_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
nouveau_mem_fini(nvbe->mem);
- return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
@@ -96,12 +95,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
- /*
- * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
- * and thus our nouveau_sgdma_destroy() hook, so we don't need
- * to free nvbe here.
- */
+ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
+ kfree(nvbe);
return NULL;
+ }
return &nvbe->ttm.ttm;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index ba9f9359c30e..d4b4f866ab78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -347,7 +347,8 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
* All future channel/memory allocations will make use of this
* VMM instead of the standard one.
*/
- ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
+ ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
+ cli->vmm.vmm.object.oclass, true,
args->unmanaged_addr, args->unmanaged_size,
&(struct gp100_vmm_v0) {
.fault_replay = true,
@@ -562,6 +563,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.end = notifier->notifier.interval_tree.last + 1,
.pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
.hmm_pfns = hmm_pfns,
+ .dev_private_owner = drm->dev,
};
struct mm_struct *mm = notifier->notifier.mm;
int ret;
@@ -903,8 +905,8 @@ nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
nouveau_svm_fault_buffer_fini(svm, id);
- nvif_notify_fini(&buffer->notify);
- nvif_object_fini(&buffer->object);
+ nvif_notify_dtor(&buffer->notify);
+ nvif_object_dtor(&buffer->object);
}
static int
@@ -918,8 +920,8 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
buffer->id = id;
- ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
- &buffer->object);
+ ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
+ sizeof(args), &buffer->object);
if (ret < 0) {
SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
return ret;
@@ -930,8 +932,8 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
buffer->getaddr = args.get;
buffer->putaddr = args.put;
- ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
- NVB069_V0_NTFY_FAULT, NULL, 0, 0,
+ ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
+ true, NVB069_V0_NTFY_FAULT, NULL, 0, 0,
&buffer->notify);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index b28c7dc13ad6..a49e88129c92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -121,15 +121,15 @@ void
nouveau_vmm_fini(struct nouveau_vmm *vmm)
{
nouveau_svmm_fini(&vmm->svmm);
- nvif_vmm_fini(&vmm->vmm);
+ nvif_vmm_dtor(&vmm->vmm);
vmm->cli = NULL;
}
int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{
- int ret = nvif_vmm_init(&cli->mmu, oclass, false, PAGE_SIZE, 0, NULL, 0,
- &vmm->vmm);
+ int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE,
+ 0, NULL, 0, &vmm->vmm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 01731dbeb3d8..92f3fb6765ab 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -21,28 +21,30 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
+#define NVIF_DEBUG_PRINT_DISABLE
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
+#include <nvif/push006c.h>
+
int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
int ret;
- ret = RING_SPACE(chan, 4);
+ ret = PUSH_WAIT(push, 4);
if (ret)
return ret;
- BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3);
- OUT_RING(chan, (region->sy << 16) | region->sx);
- OUT_RING(chan, (region->dy << 16) | region->dx);
- OUT_RING(chan, (region->height << 16) | region->width);
- FIRE_RING(chan);
+ PUSH_NVSQ(push, NV05F, 0x0300, (region->sy << 16) | region->sx,
+ 0x0304, (region->dy << 16) | region->dx,
+ 0x0308, (region->height << 16) | region->width);
+ PUSH_KICK(push);
return 0;
}
@@ -52,24 +54,22 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
int ret;
- ret = RING_SPACE(chan, 7);
+ ret = PUSH_WAIT(push, 7);
if (ret)
return ret;
- BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
- OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
- BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1);
+ PUSH_NVSQ(push, NV04A, 0x02fc, (rect->rop != ROP_COPY) ? 1 : 3);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
- OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ PUSH_NVSQ(push, NV04A, 0x03fc, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
- OUT_RING(chan, rect->color);
- BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2);
- OUT_RING(chan, (rect->dx << 16) | rect->dy);
- OUT_RING(chan, (rect->width << 16) | rect->height);
- FIRE_RING(chan);
+ PUSH_NVSQ(push, NV04A, 0x03fc, rect->color);
+ PUSH_NVSQ(push, NV04A, 0x0400, (rect->dx << 16) | rect->dy,
+ 0x0404, (rect->width << 16) | rect->height);
+ PUSH_KICK(push);
return 0;
}
@@ -79,6 +79,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
uint32_t fg;
uint32_t bg;
uint32_t dsize;
@@ -88,7 +89,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (image->depth != 1)
return -ENODEV;
- ret = RING_SPACE(chan, 8);
+ ret = PUSH_WAIT(push, 8);
if (ret)
return ret;
@@ -101,31 +102,29 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
bg = image->bg_color;
}
- BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7);
- OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
- OUT_RING(chan, ((image->dy + image->height) << 16) |
- ((image->dx + image->width) & 0xffff));
- OUT_RING(chan, bg);
- OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
- OUT_RING(chan, (image->height << 16) | image->width);
- OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ PUSH_NVSQ(push, NV04A, 0x0be4, (image->dy << 16) | (image->dx & 0xffff),
+ 0x0be8, ((image->dy + image->height) << 16) |
+ ((image->dx + image->width) & 0xffff),
+ 0x0bec, bg,
+ 0x0bf0, fg,
+ 0x0bf4, (image->height << 16) | ALIGN(image->width, 8),
+ 0x0bf8, (image->height << 16) | image->width,
+ 0x0bfc, (image->dy << 16) | (image->dx & 0xffff));
dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
- ret = RING_SPACE(chan, iter_len + 1);
+ ret = PUSH_WAIT(push, iter_len + 1);
if (ret)
return ret;
- BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len);
- OUT_RINGp(chan, data, iter_len);
+ PUSH_NVSQ(push, NV04A, 0x0c00, data, iter_len);
data += iter_len;
dsize -= iter_len;
}
- FIRE_RING(chan);
+ PUSH_KICK(push);
return 0;
}
@@ -137,6 +136,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
struct nvif_device *device = &drm->client.device;
+ struct nvif_push *push = chan->chan.push;
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@@ -168,110 +168,90 @@ nv04_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nvif_object_init(&chan->user, 0x0062,
+ ret = nvif_object_ctor(&chan->user, "fbconCtxSurf2d", 0x0062,
device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
if (ret)
return ret;
- ret = nvif_object_init(&chan->user, 0x0019, 0x0019, NULL, 0,
- &nfbdev->clip);
+ ret = nvif_object_ctor(&chan->user, "fbconCtxClip", 0x0019, 0x0019,
+ NULL, 0, &nfbdev->clip);
if (ret)
return ret;
- ret = nvif_object_init(&chan->user, 0x0043, 0x0043, NULL, 0,
- &nfbdev->rop);
+ ret = nvif_object_ctor(&chan->user, "fbconCtxRop", 0x0043, 0x0043,
+ NULL, 0, &nfbdev->rop);
if (ret)
return ret;
- ret = nvif_object_init(&chan->user, 0x0044, 0x0044, NULL, 0,
- &nfbdev->patt);
+ ret = nvif_object_ctor(&chan->user, "fbconCtxPatt", 0x0044, 0x0044,
+ NULL, 0, &nfbdev->patt);
if (ret)
return ret;
- ret = nvif_object_init(&chan->user, 0x004a, 0x004a, NULL, 0,
- &nfbdev->gdi);
+ ret = nvif_object_ctor(&chan->user, "fbconGdiRectText", 0x004a, 0x004a,
+ NULL, 0, &nfbdev->gdi);
if (ret)
return ret;
- ret = nvif_object_init(&chan->user, 0x005f,
+ ret = nvif_object_ctor(&chan->user, "fbconImageBlit", 0x005f,
device->info.chipset >= 0x11 ? 0x009f : 0x005f,
NULL, 0, &nfbdev->blit);
if (ret)
return ret;
- if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
+ if (PUSH_WAIT(push, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
nouveau_fbcon_gpu_lockup(info);
return 0;
}
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, nfbdev->surf2d.handle);
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
- OUT_RING(chan, chan->vram.handle);
- OUT_RING(chan, chan->vram.handle);
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
- OUT_RING(chan, surface_fmt);
- OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
- OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
- OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
-
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, nfbdev->rop.handle);
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
- OUT_RING(chan, 0x55);
-
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, nfbdev->patt.handle);
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
- OUT_RING(chan, pattern_fmt);
+ PUSH_NVSQ(push, NV042, 0x0000, nfbdev->surf2d.handle);
+ PUSH_NVSQ(push, NV042, 0x0184, chan->vram.handle,
+ 0x0188, chan->vram.handle);
+ PUSH_NVSQ(push, NV042, 0x0300, surface_fmt,
+ 0x0304, info->fix.line_length | (info->fix.line_length << 16),
+ 0x0308, info->fix.smem_start - dev->mode_config.fb_base,
+ 0x030c, info->fix.smem_start - dev->mode_config.fb_base);
+
+ PUSH_NVSQ(push, NV043, 0x0000, nfbdev->rop.handle);
+ PUSH_NVSQ(push, NV043, 0x0300, 0x55);
+
+ PUSH_NVSQ(push, NV044, 0x0000, nfbdev->patt.handle);
+ PUSH_NVSQ(push, NV044, 0x0300, pattern_fmt,
#ifdef __BIG_ENDIAN
- OUT_RING(chan, 2);
+ 0x0304, 2,
#else
- OUT_RING(chan, 1);
+ 0x0304, 1,
#endif
- OUT_RING(chan, 0);
- OUT_RING(chan, 1);
- OUT_RING(chan, ~0);
- OUT_RING(chan, ~0);
- OUT_RING(chan, ~0);
- OUT_RING(chan, ~0);
-
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, nfbdev->clip.handle);
- BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
- OUT_RING(chan, 0);
- OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
-
- BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
- OUT_RING(chan, nfbdev->blit.handle);
- BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
- OUT_RING(chan, nfbdev->surf2d.handle);
- BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
- OUT_RING(chan, 3);
- if (device->info.chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
- BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
- OUT_RING(chan, 0);
- OUT_RING(chan, 1);
- OUT_RING(chan, 2);
+ 0x0308, 0,
+ 0x030c, 1,
+ 0x0310, ~0,
+ 0x0314, ~0,
+ 0x0318, ~0,
+ 0x031c, ~0);
+
+ PUSH_NVSQ(push, NV019, 0x0000, nfbdev->clip.handle);
+ PUSH_NVSQ(push, NV019, 0x0300, 0,
+ 0x0304, (info->var.yres_virtual << 16) | info->var.xres_virtual);
+
+ PUSH_NVSQ(push, NV05F, 0x0000, nfbdev->blit.handle);
+ PUSH_NVSQ(push, NV05F, 0x019c, nfbdev->surf2d.handle);
+ PUSH_NVSQ(push, NV05F, 0x02fc, 3);
+ if (nfbdev->blit.oclass == 0x009f) {
+ PUSH_NVSQ(push, NV09F, 0x0120, 0,
+ 0x0124, 1,
+ 0x0128, 2);
}
- BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
- OUT_RING(chan, nfbdev->gdi.handle);
- BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
- OUT_RING(chan, nfbdev->surf2d.handle);
- BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
- OUT_RING(chan, nfbdev->patt.handle);
- OUT_RING(chan, nfbdev->rop.handle);
- BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
- OUT_RING(chan, rect_fmt);
- BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
- OUT_RING(chan, 3);
-
- FIRE_RING(chan);
+ PUSH_NVSQ(push, NV04A, 0x0000, nfbdev->gdi.handle);
+ PUSH_NVSQ(push, NV04A, 0x0198, nfbdev->surf2d.handle);
+ PUSH_NVSQ(push, NV04A, 0x0188, nfbdev->patt.handle,
+ 0x018c, nfbdev->rop.handle);
+ PUSH_NVSQ(push, NV04A, 0x0304, 1);
+ PUSH_NVSQ(push, NV04A, 0x0300, rect_fmt);
+ PUSH_NVSQ(push, NV04A, 0x02fc, 3);
+ PUSH_KICK(push);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index c41e82be4893..5b71a5a5cd85 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -21,12 +21,12 @@
*
* Authors: Ben Skeggs
*/
-
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
#include <nvif/if0004.h>
+#include <nvif/push006c.h>
struct nv04_fence_chan {
struct nouveau_fence_chan base;
@@ -39,12 +39,11 @@ struct nv04_fence_priv {
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
- struct nouveau_channel *chan = fence->channel;
- int ret = RING_SPACE(chan, 2);
+ struct nvif_push *push = fence->channel->chan.push;
+ int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
- BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
- OUT_RING (chan, fence->base.seqno);
- FIRE_RING (chan);
+ PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
+ PUSH_KICK(push);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4476b712dc84..c6a0db5b9e21 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -21,20 +21,22 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
+#include <nvif/push006c.h>
+
+#include <nvhw/class/cl006e.h>
+
int
nv10_fence_emit(struct nouveau_fence *fence)
{
- struct nouveau_channel *chan = fence->channel;
- int ret = RING_SPACE(chan, 2);
+ struct nvif_push *push = fence->channel->chan.push;
+ int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
- BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
- OUT_RING (chan, fence->base.seqno);
- FIRE_RING (chan);
+ PUSH_MTHD(push, NV06E, SET_REFERENCE, fence->base.seqno);
+ PUSH_KICK(push);
}
return ret;
}
@@ -50,7 +52,7 @@ nv10_fence_sync(struct nouveau_fence *fence,
u32
nv10_fence_read(struct nouveau_channel *chan)
{
- return nvif_rd32(&chan->user, 0x0048);
+ return NVIF_RD32(&chan->user, NV06E, REFERENCE);
}
void
@@ -58,7 +60,7 @@ nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
nouveau_fence_context_del(&fctx->base);
- nvif_object_fini(&fctx->sema);
+ nvif_object_dtor(&fctx->sema);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 5d613d43b84d..cd1e87a528a4 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -21,14 +21,16 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nv10_fence.h"
+
+#include <nvif/push006c.h>
-#include <nvif/os.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
-#include "nouveau_drv.h"
-#include "nouveau_dma.h"
-#include "nv10_fence.h"
+#include <nvhw/class/cl176e.h>
int
nv17_fence_sync(struct nouveau_fence *fence,
@@ -37,6 +39,8 @@ nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_cli *cli = (void *)prev->user.client;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx = chan->fence;
+ struct nvif_push *ppush = prev->chan.push;
+ struct nvif_push *npush = chan->chan.push;
u32 value;
int ret;
@@ -48,23 +52,21 @@ nv17_fence_sync(struct nouveau_fence *fence,
priv->sequence += 2;
spin_unlock(&priv->lock);
- ret = RING_SPACE(prev, 5);
+ ret = PUSH_WAIT(ppush, 5);
if (!ret) {
- BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (prev, fctx->sema.handle);
- OUT_RING (prev, 0);
- OUT_RING (prev, value + 0);
- OUT_RING (prev, value + 1);
- FIRE_RING (prev);
+ PUSH_MTHD(ppush, NV176E, SET_CONTEXT_DMA_SEMAPHORE, fctx->sema.handle,
+ SEMAPHORE_OFFSET, 0,
+ SEMAPHORE_ACQUIRE, value + 0,
+ SEMAPHORE_RELEASE, value + 1);
+ PUSH_KICK(ppush);
}
- if (!ret && !(ret = RING_SPACE(chan, 5))) {
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (chan, fctx->sema.handle);
- OUT_RING (chan, 0);
- OUT_RING (chan, value + 1);
- OUT_RING (chan, value + 2);
- FIRE_RING (chan);
+ if (!ret && !(ret = PUSH_WAIT(npush, 5))) {
+ PUSH_MTHD(npush, NV176E, SET_CONTEXT_DMA_SEMAPHORE, fctx->sema.handle,
+ SEMAPHORE_OFFSET, 0,
+ SEMAPHORE_ACQUIRE, value + 1,
+ SEMAPHORE_RELEASE, value + 2);
+ PUSH_KICK(npush);
}
mutex_unlock(&cli->mutex);
@@ -90,7 +92,8 @@ nv17_fence_context_new(struct nouveau_channel *chan)
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
- ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY,
+ ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
+ NV_DMA_FROM_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 47428f79ede8..71f92e4750f9 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -21,44 +21,54 @@
*
* Authors: Ben Skeggs
*/
-
+#define NVIF_DEBUG_PRINT_DISABLE
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
#include "nouveau_vmm.h"
+#include <nvif/push206e.h>
+
+#include <nvhw/class/cl502d.h>
+
int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
+ u32 colour;
int ret;
- ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ colour = ((uint32_t *)info->pseudo_palette)[rect->color];
+ else
+ colour = rect->color;
+
+ ret = PUSH_WAIT(push, rect->rop == ROP_COPY ? 7 : 11);
if (ret)
return ret;
if (rect->rop != ROP_COPY) {
- BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
- OUT_RING(chan, 1);
+ PUSH_MTHD(push, NV502D, SET_OPERATION,
+ NVDEF(NV502D, SET_OPERATION, V, ROP_AND));
}
- BEGIN_NV04(chan, NvSub2D, 0x0588, 1);
- if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
- info->fix.visual == FB_VISUAL_DIRECTCOLOR)
- OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
- else
- OUT_RING(chan, rect->color);
- BEGIN_NV04(chan, NvSub2D, 0x0600, 4);
- OUT_RING(chan, rect->dx);
- OUT_RING(chan, rect->dy);
- OUT_RING(chan, rect->dx + rect->width);
- OUT_RING(chan, rect->dy + rect->height);
+
+ PUSH_MTHD(push, NV502D, SET_RENDER_SOLID_PRIM_COLOR, colour);
+
+ PUSH_MTHD(push, NV502D, RENDER_SOLID_PRIM_POINT_SET_X(0), rect->dx,
+ RENDER_SOLID_PRIM_POINT_Y(0), rect->dy,
+ RENDER_SOLID_PRIM_POINT_SET_X(1), rect->dx + rect->width,
+ RENDER_SOLID_PRIM_POINT_Y(1), rect->dy + rect->height);
+
if (rect->rop != ROP_COPY) {
- BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
- OUT_RING(chan, 3);
+ PUSH_MTHD(push, NV502D, SET_OPERATION,
+ NVDEF(NV502D, SET_OPERATION, V, SRCCOPY));
}
- FIRE_RING(chan);
+
+ PUSH_KICK(push);
return 0;
}
@@ -68,25 +78,25 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
int ret;
- ret = RING_SPACE(chan, 12);
+ ret = PUSH_WAIT(push, 12);
if (ret)
return ret;
- BEGIN_NV04(chan, NvSub2D, 0x0110, 1);
- OUT_RING(chan, 0);
- BEGIN_NV04(chan, NvSub2D, 0x08b0, 4);
- OUT_RING(chan, region->dx);
- OUT_RING(chan, region->dy);
- OUT_RING(chan, region->width);
- OUT_RING(chan, region->height);
- BEGIN_NV04(chan, NvSub2D, 0x08d0, 4);
- OUT_RING(chan, 0);
- OUT_RING(chan, region->sx);
- OUT_RING(chan, 0);
- OUT_RING(chan, region->sy);
- FIRE_RING(chan);
+ PUSH_MTHD(push, NV502D, WAIT_FOR_IDLE, 0);
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_MEMORY_DST_X0, region->dx,
+ SET_PIXELS_FROM_MEMORY_DST_Y0, region->dy,
+ SET_PIXELS_FROM_MEMORY_DST_WIDTH, region->width,
+ SET_PIXELS_FROM_MEMORY_DST_HEIGHT, region->height);
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC, 0,
+ SET_PIXELS_FROM_MEMORY_SRC_X0_INT, region->sx,
+ SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC, 0,
+ PIXELS_FROM_MEMORY_SRC_Y0_INT, region->sy);
+ PUSH_KICK(push);
return 0;
}
@@ -96,52 +106,54 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
- uint32_t *palette = info->pseudo_palette;
+ uint32_t *palette = info->pseudo_palette, bg, fg;
int ret;
if (image->depth != 1)
return -ENODEV;
- ret = RING_SPACE(chan, 11);
- if (ret)
- return ret;
-
- BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- OUT_RING(chan, palette[image->bg_color] | mask);
- OUT_RING(chan, palette[image->fg_color] | mask);
+ bg = palette[image->bg_color] | mask;
+ fg = palette[image->fg_color] | mask;
} else {
- OUT_RING(chan, image->bg_color);
- OUT_RING(chan, image->fg_color);
+ bg = image->bg_color;
+ fg = image->fg_color;
}
- BEGIN_NV04(chan, NvSub2D, 0x0838, 2);
- OUT_RING(chan, image->width);
- OUT_RING(chan, image->height);
- BEGIN_NV04(chan, NvSub2D, 0x0850, 4);
- OUT_RING(chan, 0);
- OUT_RING(chan, image->dx);
- OUT_RING(chan, 0);
- OUT_RING(chan, image->dy);
+
+ ret = PUSH_WAIT(push, 11);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_COLOR0, bg,
+ SET_PIXELS_FROM_CPU_COLOR1, fg);
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_SRC_WIDTH, image->width,
+ SET_PIXELS_FROM_CPU_SRC_HEIGHT, image->height);
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_DST_X0_FRAC, 0,
+ SET_PIXELS_FROM_CPU_DST_X0_INT, image->dx,
+ SET_PIXELS_FROM_CPU_DST_Y0_FRAC, 0,
+ SET_PIXELS_FROM_CPU_DST_Y0_INT, image->dy);
dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
- int push = dwords > 2047 ? 2047 : dwords;
+ int count = dwords > 2047 ? 2047 : dwords;
- ret = RING_SPACE(chan, push + 1);
+ ret = PUSH_WAIT(push, count + 1);
if (ret)
return ret;
- dwords -= push;
+ dwords -= count;
- BEGIN_NI04(chan, NvSub2D, 0x0860, push);
- OUT_RINGp(chan, data, push);
- data += push;
+ PUSH_NINC(push, NV502D, PIXELS_FROM_CPU_DATA, data, count);
+ data += count;
}
- FIRE_RING(chan);
+ PUSH_KICK(push);
return 0;
}
@@ -152,26 +164,27 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
int ret, format;
switch (info->var.bits_per_pixel) {
case 8:
- format = 0xf3;
+ format = NV502D_SET_DST_FORMAT_V_Y8;
break;
case 15:
- format = 0xf8;
+ format = NV502D_SET_DST_FORMAT_V_X1R5G5B5;
break;
case 16:
- format = 0xe8;
+ format = NV502D_SET_DST_FORMAT_V_R5G6B5;
break;
case 32:
switch (info->var.transp.length) {
case 0: /* depth 24 */
case 8: /* depth 32, just use 24.. */
- format = 0xe6;
+ format = NV502D_SET_DST_FORMAT_V_X8R8G8B8;
break;
case 2: /* depth 30 */
- format = 0xd1;
+ format = NV502D_SET_DST_FORMAT_V_A2B10G10R10;
break;
default:
return -EINVAL;
@@ -181,77 +194,106 @@ nv50_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nvif_object_init(&chan->user, 0x502d, 0x502d, NULL, 0,
- &nfbdev->twod);
+ ret = nvif_object_ctor(&chan->user, "fbconTwoD", 0x502d, 0x502d,
+ NULL, 0, &nfbdev->twod);
if (ret)
return ret;
- ret = RING_SPACE(chan, 58);
+ ret = PUSH_WAIT(push, 56);
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return ret;
}
- BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
- OUT_RING(chan, nfbdev->twod.handle);
- BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
- OUT_RING(chan, chan->vram.handle);
- OUT_RING(chan, chan->vram.handle);
- OUT_RING(chan, chan->vram.handle);
- BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
- OUT_RING(chan, 0);
- BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
- OUT_RING(chan, 3);
- BEGIN_NV04(chan, NvSub2D, 0x02a0, 1);
- OUT_RING(chan, 0x55);
- BEGIN_NV04(chan, NvSub2D, 0x08c0, 4);
- OUT_RING(chan, 0);
- OUT_RING(chan, 1);
- OUT_RING(chan, 0);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x0580, 2);
- OUT_RING(chan, 4);
- OUT_RING(chan, format);
- BEGIN_NV04(chan, NvSub2D, 0x02e8, 2);
- OUT_RING(chan, 2);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x0804, 1);
- OUT_RING(chan, format);
- BEGIN_NV04(chan, NvSub2D, 0x0800, 1);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x0808, 3);
- OUT_RING(chan, 0);
- OUT_RING(chan, 0);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x081c, 1);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x0840, 4);
- OUT_RING(chan, 0);
- OUT_RING(chan, 1);
- OUT_RING(chan, 0);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x0200, 2);
- OUT_RING(chan, format);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x0214, 5);
- OUT_RING(chan, info->fix.line_length);
- OUT_RING(chan, info->var.xres_virtual);
- OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
- OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
- BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
- OUT_RING(chan, format);
- OUT_RING(chan, 1);
- BEGIN_NV04(chan, NvSub2D, 0x0244, 5);
- OUT_RING(chan, info->fix.line_length);
- OUT_RING(chan, info->var.xres_virtual);
- OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
- OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
- FIRE_RING(chan);
+ PUSH_MTHD(push, NV502D, SET_OBJECT, nfbdev->twod.handle);
+ PUSH_MTHD(push, NV502D, SET_DST_CONTEXT_DMA, chan->vram.handle,
+ SET_SRC_CONTEXT_DMA, chan->vram.handle,
+ SET_SEMAPHORE_CONTEXT_DMA, chan->vram.handle);
+
+ PUSH_MTHD(push, NV502D, SET_DST_FORMAT,
+ NVVAL(NV502D, SET_DST_FORMAT, V, format),
+
+ SET_DST_MEMORY_LAYOUT,
+ NVDEF(NV502D, SET_DST_MEMORY_LAYOUT, V, PITCH));
+
+ PUSH_MTHD(push, NV502D, SET_DST_PITCH, info->fix.line_length,
+ SET_DST_WIDTH, info->var.xres_virtual,
+ SET_DST_HEIGHT, info->var.yres_virtual,
+
+ SET_DST_OFFSET_UPPER,
+ NVVAL(NV502D, SET_DST_OFFSET_UPPER, V, upper_32_bits(nfbdev->vma->addr)),
+
+ SET_DST_OFFSET_LOWER,
+ NVVAL(NV502D, SET_DST_OFFSET_LOWER, V, lower_32_bits(nfbdev->vma->addr)));
+
+ PUSH_MTHD(push, NV502D, SET_SRC_FORMAT,
+ NVVAL(NV502D, SET_SRC_FORMAT, V, format),
+
+ SET_SRC_MEMORY_LAYOUT,
+ NVDEF(NV502D, SET_SRC_MEMORY_LAYOUT, V, PITCH));
+
+ PUSH_MTHD(push, NV502D, SET_SRC_PITCH, info->fix.line_length,
+ SET_SRC_WIDTH, info->var.xres_virtual,
+ SET_SRC_HEIGHT, info->var.yres_virtual,
+
+ SET_SRC_OFFSET_UPPER,
+ NVVAL(NV502D, SET_SRC_OFFSET_UPPER, V, upper_32_bits(nfbdev->vma->addr)),
+
+ SET_SRC_OFFSET_LOWER,
+ NVVAL(NV502D, SET_SRC_OFFSET_LOWER, V, lower_32_bits(nfbdev->vma->addr)));
+
+ PUSH_MTHD(push, NV502D, SET_CLIP_ENABLE,
+ NVDEF(NV502D, SET_CLIP_ENABLE, V, FALSE));
+
+ PUSH_MTHD(push, NV502D, SET_ROP,
+ NVVAL(NV502D, SET_ROP, V, 0x55));
+
+ PUSH_MTHD(push, NV502D, SET_OPERATION,
+ NVDEF(NV502D, SET_OPERATION, V, SRCCOPY));
+
+ PUSH_MTHD(push, NV502D, SET_MONOCHROME_PATTERN_COLOR_FORMAT,
+ NVDEF(NV502D, SET_MONOCHROME_PATTERN_COLOR_FORMAT, V, A8R8G8B8),
+
+ SET_MONOCHROME_PATTERN_FORMAT,
+ NVDEF(NV502D, SET_MONOCHROME_PATTERN_FORMAT, V, LE_M1));
+
+ PUSH_MTHD(push, NV502D, RENDER_SOLID_PRIM_MODE,
+ NVDEF(NV502D, RENDER_SOLID_PRIM_MODE, V, RECTS),
+
+ SET_RENDER_SOLID_PRIM_COLOR_FORMAT,
+ NVVAL(NV502D, SET_RENDER_SOLID_PRIM_COLOR_FORMAT, V, format));
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_DATA_TYPE,
+ NVDEF(NV502D, SET_PIXELS_FROM_CPU_DATA_TYPE, V, INDEX),
+
+ SET_PIXELS_FROM_CPU_COLOR_FORMAT,
+ NVVAL(NV502D, SET_PIXELS_FROM_CPU_COLOR_FORMAT, V, format),
+
+ SET_PIXELS_FROM_CPU_INDEX_FORMAT,
+ NVDEF(NV502D, SET_PIXELS_FROM_CPU_INDEX_FORMAT, V, I1),
+
+ SET_PIXELS_FROM_CPU_MONO_FORMAT,
+ NVDEF(NV502D, SET_PIXELS_FROM_CPU_MONO_FORMAT, V, CGA6_M1),
+
+ SET_PIXELS_FROM_CPU_WRAP,
+ NVDEF(NV502D, SET_PIXELS_FROM_CPU_WRAP, V, WRAP_BYTE));
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_MONO_OPACITY,
+ NVDEF(NV502D, SET_PIXELS_FROM_CPU_MONO_OPACITY, V, OPAQUE));
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_CPU_DX_DU_FRAC, 0,
+ SET_PIXELS_FROM_CPU_DX_DU_INT, 1,
+ SET_PIXELS_FROM_CPU_DY_DV_FRAC, 0,
+ SET_PIXELS_FROM_CPU_DY_DV_INT, 1);
+
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP,
+ NVDEF(NV502D, SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP, V, TRUE));
+ PUSH_MTHD(push, NV502D, SET_PIXELS_FROM_MEMORY_DU_DX_FRAC, 0,
+ SET_PIXELS_FROM_MEMORY_DU_DX_INT, 1,
+ SET_PIXELS_FROM_MEMORY_DV_DY_FRAC, 0,
+ SET_PIXELS_FROM_MEMORY_DV_DY_INT, 1);
+ PUSH_KICK(push);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index a00ecc3de053..ebb740686b44 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -51,7 +51,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
- ret = nvif_object_init(&chan->user, NvSema, NV_DMA_IN_MEMORY,
+ ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
+ NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index f07da00f285f..7ed36b3a6b7d 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -21,7 +21,6 @@
*
* Authors: Ben Skeggs
*/
-
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -29,20 +28,29 @@
#include "nv50_display.h"
+#include <nvif/push206e.h>
+
+#include <nvhw/class/cl826f.h>
+
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- int ret = RING_SPACE(chan, 8);
+ struct nvif_push *push = chan->chan.push;
+ int ret = PUSH_WAIT(push, 8);
if (ret == 0) {
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram.handle);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
- OUT_RING (chan, upper_32_bits(virtual));
- OUT_RING (chan, lower_32_bits(virtual));
- OUT_RING (chan, sequence);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
- OUT_RING (chan, 0x00000000);
- FIRE_RING (chan);
+ PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
+
+ PUSH_MTHD(push, NV826F, SEMAPHOREA,
+ NVVAL(NV826F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
+
+ SEMAPHOREB, lower_32_bits(virtual),
+ SEMAPHOREC, sequence,
+
+ SEMAPHORED,
+ NVDEF(NV826F, SEMAPHORED, OPERATION, RELEASE),
+
+ NON_STALLED_INTERRUPT, 0);
+ PUSH_KICK(push);
}
return ret;
}
@@ -50,16 +58,20 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static int
nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- int ret = RING_SPACE(chan, 7);
+ struct nvif_push *push = chan->chan.push;
+ int ret = PUSH_WAIT(push, 7);
if (ret == 0) {
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram.handle);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(virtual));
- OUT_RING (chan, lower_32_bits(virtual));
- OUT_RING (chan, sequence);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
- FIRE_RING (chan);
+ PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
+
+ PUSH_MTHD(push, NV826F, SEMAPHOREA,
+ NVVAL(NV826F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
+
+ SEMAPHOREB, lower_32_bits(virtual),
+ SEMAPHOREC, sequence,
+
+ SEMAPHORED,
+ NVDEF(NV826F, SEMAPHORED, OPERATION, ACQ_GEQ));
+ PUSH_KICK(push);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index cb56163ed608..7908a1a3e00f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -21,44 +21,54 @@
*
* Authors: Ben Skeggs
*/
-
+#define NVIF_DEBUG_PRINT_DISABLE
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
#include "nouveau_vmm.h"
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl902d.h>
+
int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
+ u32 colour;
int ret;
- ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ colour = ((uint32_t *)info->pseudo_palette)[rect->color];
+ else
+ colour = rect->color;
+
+ ret = PUSH_WAIT(push, rect->rop == ROP_COPY ? 7 : 9);
if (ret)
return ret;
if (rect->rop != ROP_COPY) {
- BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
- OUT_RING (chan, 1);
+ PUSH_IMMD(push, NV902D, SET_OPERATION,
+ NVDEF(NV902D, SET_OPERATION, V, ROP_AND));
}
- BEGIN_NVC0(chan, NvSub2D, 0x0588, 1);
- if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
- info->fix.visual == FB_VISUAL_DIRECTCOLOR)
- OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
- else
- OUT_RING (chan, rect->color);
- BEGIN_NVC0(chan, NvSub2D, 0x0600, 4);
- OUT_RING (chan, rect->dx);
- OUT_RING (chan, rect->dy);
- OUT_RING (chan, rect->dx + rect->width);
- OUT_RING (chan, rect->dy + rect->height);
+
+ PUSH_MTHD(push, NV902D, SET_RENDER_SOLID_PRIM_COLOR, colour);
+
+ PUSH_MTHD(push, NV902D, RENDER_SOLID_PRIM_POINT_SET_X(0), rect->dx,
+ RENDER_SOLID_PRIM_POINT_Y(0), rect->dy,
+ RENDER_SOLID_PRIM_POINT_SET_X(1), rect->dx + rect->width,
+ RENDER_SOLID_PRIM_POINT_Y(1), rect->dy + rect->height);
+
if (rect->rop != ROP_COPY) {
- BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
- OUT_RING (chan, 3);
+ PUSH_IMMD(push, NV902D, SET_OPERATION,
+ NVDEF(NV902D, SET_OPERATION, V, SRCCOPY));
}
- FIRE_RING(chan);
+
+ PUSH_KICK(push);
return 0;
}
@@ -68,25 +78,25 @@ nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
int ret;
- ret = RING_SPACE(chan, 12);
+ ret = PUSH_WAIT(push, 11);
if (ret)
return ret;
- BEGIN_NVC0(chan, NvSub2D, 0x0110, 1);
- OUT_RING (chan, 0);
- BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4);
- OUT_RING (chan, region->dx);
- OUT_RING (chan, region->dy);
- OUT_RING (chan, region->width);
- OUT_RING (chan, region->height);
- BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4);
- OUT_RING (chan, 0);
- OUT_RING (chan, region->sx);
- OUT_RING (chan, 0);
- OUT_RING (chan, region->sy);
- FIRE_RING(chan);
+ PUSH_IMMD(push, NV902D, WAIT_FOR_IDLE, 0);
+
+ PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_MEMORY_DST_X0, region->dx,
+ SET_PIXELS_FROM_MEMORY_DST_Y0, region->dy,
+ SET_PIXELS_FROM_MEMORY_DST_WIDTH, region->width,
+ SET_PIXELS_FROM_MEMORY_DST_HEIGHT, region->height);
+
+ PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC, 0,
+ SET_PIXELS_FROM_MEMORY_SRC_X0_INT, region->sx,
+ SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC, 0,
+ PIXELS_FROM_MEMORY_SRC_Y0_INT, region->sy);
+ PUSH_KICK(push);
return 0;
}
@@ -96,52 +106,54 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->helper.dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
- uint32_t *palette = info->pseudo_palette;
+ uint32_t *palette = info->pseudo_palette, bg, fg;
int ret;
if (image->depth != 1)
return -ENODEV;
- ret = RING_SPACE(chan, 11);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- OUT_RING (chan, palette[image->bg_color] | mask);
- OUT_RING (chan, palette[image->fg_color] | mask);
+ bg = palette[image->bg_color] | mask;
+ fg = palette[image->fg_color] | mask;
} else {
- OUT_RING (chan, image->bg_color);
- OUT_RING (chan, image->fg_color);
+ bg = image->bg_color;
+ fg = image->fg_color;
}
- BEGIN_NVC0(chan, NvSub2D, 0x0838, 2);
- OUT_RING (chan, image->width);
- OUT_RING (chan, image->height);
- BEGIN_NVC0(chan, NvSub2D, 0x0850, 4);
- OUT_RING (chan, 0);
- OUT_RING (chan, image->dx);
- OUT_RING (chan, 0);
- OUT_RING (chan, image->dy);
+
+ ret = PUSH_WAIT(push, 11);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_COLOR0, bg,
+ SET_PIXELS_FROM_CPU_COLOR1, fg);
+
+ PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_SRC_WIDTH, image->width,
+ SET_PIXELS_FROM_CPU_SRC_HEIGHT, image->height);
+
+ PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_DST_X0_FRAC, 0,
+ SET_PIXELS_FROM_CPU_DST_X0_INT, image->dx,
+ SET_PIXELS_FROM_CPU_DST_Y0_FRAC, 0,
+ SET_PIXELS_FROM_CPU_DST_Y0_INT, image->dy);
dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
- int push = dwords > 2047 ? 2047 : dwords;
+ int count = dwords > 2047 ? 2047 : dwords;
- ret = RING_SPACE(chan, push + 1);
+ ret = PUSH_WAIT(push, count + 1);
if (ret)
return ret;
- dwords -= push;
+ dwords -= count;
- BEGIN_NIC0(chan, NvSub2D, 0x0860, push);
- OUT_RINGp(chan, data, push);
- data += push;
+ PUSH_NINC(push, NV902D, PIXELS_FROM_CPU_DATA, data, count);
+ data += count;
}
- FIRE_RING(chan);
+ PUSH_KICK(push);
return 0;
}
@@ -152,31 +164,32 @@ nvc0_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
+ struct nvif_push *push = chan->chan.push;
int ret, format;
- ret = nvif_object_init(&chan->user, 0x902d, 0x902d, NULL, 0,
- &nfbdev->twod);
+ ret = nvif_object_ctor(&chan->user, "fbconTwoD", 0x902d, 0x902d,
+ NULL, 0, &nfbdev->twod);
if (ret)
return ret;
switch (info->var.bits_per_pixel) {
case 8:
- format = 0xf3;
+ format = NV902D_SET_DST_FORMAT_V_Y8;
break;
case 15:
- format = 0xf8;
+ format = NV902D_SET_DST_FORMAT_V_X1R5G5B5;
break;
case 16:
- format = 0xe8;
+ format = NV902D_SET_DST_FORMAT_V_R5G6B5;
break;
case 32:
switch (info->var.transp.length) {
case 0: /* depth 24 */
case 8: /* depth 32, just use 24.. */
- format = 0xe6;
+ format = NV902D_SET_DST_FORMAT_V_X8R8G8B8;
break;
case 2: /* depth 30 */
- format = 0xd1;
+ format = NV902D_SET_DST_FORMAT_V_A2B10G10R10;
break;
default:
return -EINVAL;
@@ -186,74 +199,99 @@ nvc0_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = RING_SPACE(chan, 58);
+ ret = PUSH_WAIT(push, 52);
if (ret) {
WARN_ON(1);
nouveau_fbcon_gpu_lockup(info);
return ret;
}
- BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
- OUT_RING (chan, nfbdev->twod.handle);
- BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
- OUT_RING (chan, 0);
- BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
- OUT_RING (chan, 1);
- BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
- OUT_RING (chan, 3);
- BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1);
- OUT_RING (chan, 0x55);
- BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4);
- OUT_RING (chan, 0);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, 1);
- BEGIN_NVC0(chan, NvSub2D, 0x0580, 2);
- OUT_RING (chan, 4);
- OUT_RING (chan, format);
- BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2);
- OUT_RING (chan, 2);
- OUT_RING (chan, 1);
-
- BEGIN_NVC0(chan, NvSub2D, 0x0804, 1);
- OUT_RING (chan, format);
- BEGIN_NVC0(chan, NvSub2D, 0x0800, 1);
- OUT_RING (chan, 1);
- BEGIN_NVC0(chan, NvSub2D, 0x0808, 3);
- OUT_RING (chan, 0);
- OUT_RING (chan, 0);
- OUT_RING (chan, 1);
- BEGIN_NVC0(chan, NvSub2D, 0x081c, 1);
- OUT_RING (chan, 1);
- BEGIN_NVC0(chan, NvSub2D, 0x0840, 4);
- OUT_RING (chan, 0);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, 1);
- BEGIN_NVC0(chan, NvSub2D, 0x0200, 10);
- OUT_RING (chan, format);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, info->fix.line_length);
- OUT_RING (chan, info->var.xres_virtual);
- OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
- OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
- BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
- OUT_RING (chan, format);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, 1);
- OUT_RING (chan, 0);
- OUT_RING (chan, info->fix.line_length);
- OUT_RING (chan, info->var.xres_virtual);
- OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
- OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
- FIRE_RING (chan);
+ PUSH_MTHD(push, NV902D, SET_OBJECT, nfbdev->twod.handle);
+
+ PUSH_MTHD(push, NV902D, SET_DST_FORMAT,
+ NVVAL(NV902D, SET_DST_FORMAT, V, format),
+
+ SET_DST_MEMORY_LAYOUT,
+ NVDEF(NV902D, SET_DST_MEMORY_LAYOUT, V, PITCH));
+
+ PUSH_MTHD(push, NV902D, SET_DST_PITCH, info->fix.line_length,
+ SET_DST_WIDTH, info->var.xres_virtual,
+ SET_DST_HEIGHT, info->var.yres_virtual,
+
+ SET_DST_OFFSET_UPPER,
+ NVVAL(NV902D, SET_DST_OFFSET_UPPER, V, upper_32_bits(nfbdev->vma->addr)),
+
+ SET_DST_OFFSET_LOWER,
+ NVVAL(NV902D, SET_DST_OFFSET_LOWER, V, lower_32_bits(nfbdev->vma->addr)));
+
+ PUSH_MTHD(push, NV902D, SET_SRC_FORMAT,
+ NVVAL(NV902D, SET_SRC_FORMAT, V, format),
+
+ SET_SRC_MEMORY_LAYOUT,
+ NVDEF(NV902D, SET_SRC_MEMORY_LAYOUT, V, PITCH));
+
+ PUSH_MTHD(push, NV902D, SET_SRC_PITCH, info->fix.line_length,
+ SET_SRC_WIDTH, info->var.xres_virtual,
+ SET_SRC_HEIGHT, info->var.yres_virtual,
+
+ SET_SRC_OFFSET_UPPER,
+ NVVAL(NV902D, SET_SRC_OFFSET_UPPER, V, upper_32_bits(nfbdev->vma->addr)),
+
+ SET_SRC_OFFSET_LOWER,
+ NVVAL(NV902D, SET_SRC_OFFSET_LOWER, V, lower_32_bits(nfbdev->vma->addr)));
+
+ PUSH_IMMD(push, NV902D, SET_CLIP_ENABLE,
+ NVDEF(NV902D, SET_CLIP_ENABLE, V, FALSE));
+
+ PUSH_IMMD(push, NV902D, SET_ROP,
+ NVVAL(NV902D, SET_ROP, V, 0x55));
+
+ PUSH_IMMD(push, NV902D, SET_OPERATION,
+ NVDEF(NV902D, SET_OPERATION, V, SRCCOPY));
+
+ PUSH_MTHD(push, NV902D, SET_MONOCHROME_PATTERN_COLOR_FORMAT,
+ NVDEF(NV902D, SET_MONOCHROME_PATTERN_COLOR_FORMAT, V, A8R8G8B8),
+
+ SET_MONOCHROME_PATTERN_FORMAT,
+ NVDEF(NV902D, SET_MONOCHROME_PATTERN_FORMAT, V, LE_M1));
+
+ PUSH_MTHD(push, NV902D, RENDER_SOLID_PRIM_MODE,
+ NVDEF(NV902D, RENDER_SOLID_PRIM_MODE, V, RECTS),
+
+ SET_RENDER_SOLID_PRIM_COLOR_FORMAT,
+ NVVAL(NV902D, SET_RENDER_SOLID_PRIM_COLOR_FORMAT, V, format));
+
+ PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_DATA_TYPE,
+ NVDEF(NV902D, SET_PIXELS_FROM_CPU_DATA_TYPE, V, INDEX),
+
+ SET_PIXELS_FROM_CPU_COLOR_FORMAT,
+ NVVAL(NV902D, SET_PIXELS_FROM_CPU_COLOR_FORMAT, V, format),
+
+ SET_PIXELS_FROM_CPU_INDEX_FORMAT,
+ NVDEF(NV902D, SET_PIXELS_FROM_CPU_INDEX_FORMAT, V, I1),
+
+ SET_PIXELS_FROM_CPU_MONO_FORMAT,
+ NVDEF(NV902D, SET_PIXELS_FROM_CPU_MONO_FORMAT, V, CGA6_M1),
+
+ SET_PIXELS_FROM_CPU_WRAP,
+ NVDEF(NV902D, SET_PIXELS_FROM_CPU_WRAP, V, WRAP_BYTE));
+
+ PUSH_IMMD(push, NV902D, SET_PIXELS_FROM_CPU_MONO_OPACITY,
+ NVDEF(NV902D, SET_PIXELS_FROM_CPU_MONO_OPACITY, V, OPAQUE));
+
+ PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_CPU_DX_DU_FRAC, 0,
+ SET_PIXELS_FROM_CPU_DX_DU_INT, 1,
+ SET_PIXELS_FROM_CPU_DY_DV_FRAC, 0,
+ SET_PIXELS_FROM_CPU_DY_DV_INT, 1);
+
+ PUSH_IMMD(push, NV902D, SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP,
+ NVDEF(NV902D, SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP, V, TRUE));
+ PUSH_MTHD(push, NV902D, SET_PIXELS_FROM_MEMORY_DU_DX_FRAC, 0,
+ SET_PIXELS_FROM_MEMORY_DU_DX_INT, 1,
+ SET_PIXELS_FROM_MEMORY_DV_DY_FRAC, 0,
+ SET_PIXELS_FROM_MEMORY_DV_DY_INT, 1);
+ PUSH_KICK(push);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index b79775788bbd..e1461c0b0779 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -21,25 +21,35 @@
*
* Authors: Ben Skeggs
*/
-
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
#include "nv50_display.h"
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl906f.h>
+
static int
nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- int ret = RING_SPACE(chan, 6);
+ struct nvif_push *push = chan->chan.push;
+ int ret = PUSH_WAIT(push, 6);
if (ret == 0) {
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
- OUT_RING (chan, upper_32_bits(virtual));
- OUT_RING (chan, lower_32_bits(virtual));
- OUT_RING (chan, sequence);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
- OUT_RING (chan, 0x00000000);
- FIRE_RING (chan);
+ PUSH_MTHD(push, NV906F, SEMAPHOREA,
+ NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
+
+ SEMAPHOREB, lower_32_bits(virtual),
+ SEMAPHOREC, sequence,
+
+ SEMAPHORED,
+ NVDEF(NV906F, SEMAPHORED, OPERATION, RELEASE) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_WFI, EN) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_SIZE, 16BYTE),
+
+ NON_STALL_INTERRUPT, 0);
+ PUSH_KICK(push);
}
return ret;
}
@@ -47,15 +57,19 @@ nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static int
nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- int ret = RING_SPACE(chan, 5);
+ struct nvif_push *push = chan->chan.push;
+ int ret = PUSH_WAIT(push, 5);
if (ret == 0) {
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(virtual));
- OUT_RING (chan, lower_32_bits(virtual));
- OUT_RING (chan, sequence);
- OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
- NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
- FIRE_RING (chan);
+ PUSH_MTHD(push, NV906F, SEMAPHOREA,
+ NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
+
+ SEMAPHOREB, lower_32_bits(virtual),
+ SEMAPHOREC, sequence,
+
+ SEMAPHORED,
+ NVDEF(NV906F, SEMAPHORED, OPERATION, ACQ_GEQ) |
+ NVDEF(NV906F, SEMAPHORED, ACQUIRE_SWITCH, ENABLED));
+ PUSH_KICK(push);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 12db54965c20..12644f811b3e 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -48,9 +48,9 @@ nvif_client_resume(struct nvif_client *client)
}
void
-nvif_client_fini(struct nvif_client *client)
+nvif_client_dtor(struct nvif_client *client)
{
- nvif_object_fini(&client->object);
+ nvif_object_dtor(&client->object);
if (client->driver) {
if (client->driver->fini)
client->driver->fini(client->object.priv);
@@ -59,7 +59,7 @@ nvif_client_fini(struct nvif_client *client)
}
int
-nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
+nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
struct nvif_client *client)
{
struct nvif_client_v0 args = { .device = device };
@@ -70,8 +70,9 @@ nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
int ret;
strncpy(args.name, name, sizeof(args.name));
- ret = nvif_object_init(parent != client ? &parent->object : NULL,
- 0, NVIF_CLASS_CLIENT, &args, sizeof(args),
+ ret = nvif_object_ctor(parent != client ? &parent->object : NULL,
+ name ? name : "nvifClient", 0,
+ NVIF_CLASS_CLIENT, &args, sizeof(args),
&client->object);
if (ret)
return ret;
@@ -88,6 +89,6 @@ nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
}
if (ret)
- nvif_client_fini(client);
+ nvif_client_dtor(client);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 0e92db44bbc8..8c3d883f3313 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -39,20 +39,20 @@ nvif_device_time(struct nvif_device *device)
}
void
-nvif_device_fini(struct nvif_device *device)
+nvif_device_dtor(struct nvif_device *device)
{
- nvif_user_fini(device);
+ nvif_user_dtor(device);
kfree(device->runlist);
device->runlist = NULL;
- nvif_object_fini(&device->object);
+ nvif_object_dtor(&device->object);
}
int
-nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
- void *data, u32 size, struct nvif_device *device)
+nvif_device_ctor(struct nvif_object *parent, const char *name, u32 handle,
+ s32 oclass, void *data, u32 size, struct nvif_device *device)
{
- int ret = nvif_object_init(parent, handle, oclass, data, size,
- &device->object);
+ int ret = nvif_object_ctor(parent, name ? name : "nvifDevice", handle,
+ oclass, data, size, &device->object);
device->runlist = NULL;
device->user.func = NULL;
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 61638b3b9d3d..8d0d30e08f57 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -27,11 +27,12 @@
void
nvif_disp_dtor(struct nvif_disp *disp)
{
- nvif_object_fini(&disp->object);
+ nvif_object_dtor(&disp->object);
}
int
-nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
+nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
+ struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
{ TU102_DISP, -1 },
@@ -56,6 +57,6 @@ nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
if (cid < 0)
return cid;
- return nvif_object_init(&device->object, 0, disps[cid].oclass,
- NULL, 0, &disp->object);
+ return nvif_object_ctor(&device->object, name ? name : "nvifDisp", 0,
+ disps[cid].oclass, NULL, 0, &disp->object);
}
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.c b/drivers/gpu/drm/nouveau/nvif/driver.c
index 701330956e33..5e00dd07afed 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.c
+++ b/drivers/gpu/drm/nouveau/nvif/driver.c
@@ -53,6 +53,6 @@ nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
}
if (ret == 0)
- ret = nvif_client_init(client, name, device, client);
+ ret = nvif_client_ctor(client, name, device, client);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/mem.c b/drivers/gpu/drm/nouveau/nvif/mem.c
index b6ebb3b58673..0e1b7b4c2e91 100644
--- a/drivers/gpu/drm/nouveau/nvif/mem.c
+++ b/drivers/gpu/drm/nouveau/nvif/mem.c
@@ -25,27 +25,29 @@
#include <nvif/if000a.h>
int
-nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem)
+nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size,
+ struct nvif_mem *mem)
{
- int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0,
- size, NULL, 0, mem);
+ int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type,
+ 0, size, NULL, 0, mem);
if (ret == 0) {
ret = nvif_object_map(&mem->object, NULL, 0);
if (ret)
- nvif_mem_fini(mem);
+ nvif_mem_dtor(mem);
}
return ret;
}
void
-nvif_mem_fini(struct nvif_mem *mem)
+nvif_mem_dtor(struct nvif_mem *mem)
{
- nvif_object_fini(&mem->object);
+ nvif_object_dtor(&mem->object);
}
int
-nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
- u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass,
+ int type, u8 page, u64 size, void *argv, u32 argc,
+ struct nvif_mem *mem)
{
struct nvif_mem_v0 *args;
u8 stack[128];
@@ -67,8 +69,8 @@ nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
args->size = size;
memcpy(args->data, argv, argc);
- ret = nvif_object_init(&mmu->object, 0, oclass, args,
- sizeof(*args) + argc, &mem->object);
+ ret = nvif_object_ctor(&mmu->object, name ? name : "nvifMem", 0, oclass,
+ args, sizeof(*args) + argc, &mem->object);
if (ret == 0) {
mem->type = mmu->type[type].type;
mem->page = args->page;
@@ -83,8 +85,8 @@ nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
}
int
-nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
- u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type,
+ u8 page, u64 size, void *argv, u32 argc, struct nvif_mem *mem)
{
int ret = -EINVAL, i;
@@ -92,8 +94,8 @@ nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
for (i = 0; ret && i < mmu->type_nr; i++) {
if ((mmu->type[i].type & type) == type) {
- ret = nvif_mem_init_type(mmu, oclass, i, page, size,
- argv, argc, mem);
+ ret = nvif_mem_ctor_type(mmu, name, oclass, i, page,
+ size, argv, argc, mem);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index 47efc408efa6..3709cbbc19a1 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -25,16 +25,17 @@
#include <nvif/if0008.h>
void
-nvif_mmu_fini(struct nvif_mmu *mmu)
+nvif_mmu_dtor(struct nvif_mmu *mmu)
{
kfree(mmu->kind);
kfree(mmu->type);
kfree(mmu->heap);
- nvif_object_fini(&mmu->object);
+ nvif_object_dtor(&mmu->object);
}
int
-nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
+nvif_mmu_ctor(struct nvif_object *parent, const char *name, s32 oclass,
+ struct nvif_mmu *mmu)
{
static const struct nvif_mclass mems[] = {
{ NVIF_CLASS_MEM_GF100, -1 },
@@ -50,8 +51,8 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
mmu->type = NULL;
mmu->kind = NULL;
- ret = nvif_object_init(parent, 0, oclass, &args, sizeof(args),
- &mmu->object);
+ ret = nvif_object_ctor(parent, name ? name : "nvifMmu", 0, oclass,
+ &args, sizeof(args), &mmu->object);
if (ret)
goto done;
@@ -127,6 +128,6 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
done:
if (ret)
- nvif_mmu_fini(mmu);
+ nvif_mmu_dtor(mmu);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index 278b3933dc96..143c8dc6889e 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -142,7 +142,7 @@ nvif_notify(const void *header, u32 length, const void *data, u32 size)
}
int
-nvif_notify_fini(struct nvif_notify *notify)
+nvif_notify_dtor(struct nvif_notify *notify)
{
struct nvif_object *object = notify->object;
struct {
@@ -162,9 +162,9 @@ nvif_notify_fini(struct nvif_notify *notify)
}
int
-nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
- bool work, u8 event, void *data, u32 size, u32 reply,
- struct nvif_notify *notify)
+nvif_notify_ctor(struct nvif_object *object, const char *name,
+ int (*func)(struct nvif_notify *), bool work, u8 event,
+ void *data, u32 size, u32 reply, struct nvif_notify *notify)
{
struct {
struct nvif_ioctl_v0 ioctl;
@@ -174,6 +174,7 @@ nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
int ret = -ENOMEM;
notify->object = object;
+ notify->name = name ? name : "nvifNotify";
notify->flags = 0;
atomic_set(&notify->putcnt, 1);
notify->func = func;
@@ -204,6 +205,6 @@ nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
kfree(args);
done:
if (ret)
- nvif_notify_fini(notify);
+ nvif_notify_dtor(notify);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index ef3f62840e83..671a5c0199e0 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -242,7 +242,7 @@ nvif_object_map(struct nvif_object *object, void *argv, u32 argc)
}
void
-nvif_object_fini(struct nvif_object *object)
+nvif_object_dtor(struct nvif_object *object)
{
struct {
struct nvif_ioctl_v0 ioctl;
@@ -260,8 +260,8 @@ nvif_object_fini(struct nvif_object *object)
}
int
-nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
- void *data, u32 size, struct nvif_object *object)
+nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
+ s32 oclass, void *data, u32 size, struct nvif_object *object)
{
struct {
struct nvif_ioctl_v0 ioctl;
@@ -270,6 +270,7 @@ nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
int ret = 0;
object->client = NULL;
+ object->name = name ? name : "nvifObject";
object->handle = handle;
object->oclass = oclass;
object->map.ptr = NULL;
@@ -277,10 +278,12 @@ nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
if (parent) {
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
- nvif_object_fini(object);
+ nvif_object_dtor(object);
return -ENOMEM;
}
+ object->parent = parent->parent;
+
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_NEW;
args->new.version = 0;
@@ -300,6 +303,6 @@ nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
}
if (ret)
- nvif_object_fini(object);
+ nvif_object_dtor(object);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c
index 10da3cdca647..d89f5b67b304 100644
--- a/drivers/gpu/drm/nouveau/nvif/user.c
+++ b/drivers/gpu/drm/nouveau/nvif/user.c
@@ -25,16 +25,16 @@
#include <nvif/class.h>
void
-nvif_user_fini(struct nvif_device *device)
+nvif_user_dtor(struct nvif_device *device)
{
if (device->user.func) {
- nvif_object_fini(&device->user.object);
+ nvif_object_dtor(&device->user.object);
device->user.func = NULL;
}
}
int
-nvif_user_init(struct nvif_device *device)
+nvif_user_ctor(struct nvif_device *device, const char *name)
{
struct {
s32 oclass;
@@ -53,7 +53,8 @@ nvif_user_init(struct nvif_device *device)
if (cid < 0)
return cid;
- ret = nvif_object_init(&device->object, 0, users[cid].oclass, NULL, 0,
+ ret = nvif_object_ctor(&device->object, name ? name : "nvifUsermode",
+ 0, users[cid].oclass, NULL, 0,
&device->user.object);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 11487c00b909..6053d6dc2184 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -105,15 +105,15 @@ nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
}
void
-nvif_vmm_fini(struct nvif_vmm *vmm)
+nvif_vmm_dtor(struct nvif_vmm *vmm)
{
kfree(vmm->page);
- nvif_object_fini(&vmm->object);
+ nvif_object_dtor(&vmm->object);
}
int
-nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
- u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
+nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
+ u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
{
struct nvif_vmm_v0 *args;
u32 argn = sizeof(*args) + argc;
@@ -130,8 +130,8 @@ nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
args->size = size;
memcpy(args->data, argv, argc);
- ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
- &vmm->object);
+ ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0,
+ oclass, args, argn, &vmm->object);
if (ret)
goto done;
@@ -163,7 +163,7 @@ nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
done:
if (ret)
- nvif_vmm_fini(vmm);
+ nvif_vmm_dtor(vmm);
kfree(args);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 5b90c2a1bf3d..9f4ac2672cf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2046,7 +2046,7 @@ nv120_chipset = {
.mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
- .pmu = gm107_pmu_new,
+ .pmu = gm200_pmu_new,
.therm = gm200_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2084,7 +2084,7 @@ nv124_chipset = {
.mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
- .pmu = gm107_pmu_new,
+ .pmu = gm200_pmu_new,
.therm = gm200_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2122,7 +2122,7 @@ nv126_chipset = {
.mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
- .pmu = gm107_pmu_new,
+ .pmu = gm200_pmu_new,
.therm = gm200_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2184,7 +2184,7 @@ nv130_chipset = {
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
.pci = gp100_pci_new,
- .pmu = gp100_pmu_new,
+ .pmu = gm200_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[0] = gp100_ce_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index e55054b7329f..9cf2cfe2010c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -21,6 +21,8 @@ struct nv50_disp_chan {
struct nvkm_memory *memory;
u64 push;
+
+ u32 suspend_put;
};
struct nv50_disp_chan_func {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index d162b9cf4eac..689e3cdd959a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -182,6 +182,8 @@ gf119_disp_core_fini(struct nv50_disp_chan *chan)
nvkm_error(subdev, "core fini: %08x\n",
nvkm_rd32(device, 0x610490));
}
+
+ chan->suspend_put = nvkm_rd32(device, 0x640000);
}
static int
@@ -195,7 +197,7 @@ gf119_disp_core_init(struct nv50_disp_chan *chan)
nvkm_wr32(device, 0x610498, 0x00010000);
nvkm_wr32(device, 0x61049c, 0x00000001);
nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000, 0x00000000);
+ nvkm_wr32(device, 0x640000, chan->suspend_put);
nvkm_wr32(device, 0x610490, 0x01000013);
/* wait for it to go inactive */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index 5b7f993c73c7..1b435beef3bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -36,7 +36,7 @@ gp102_disp_core_init(struct nv50_disp_chan *chan)
nvkm_wr32(device, 0x611498, 0x00010000);
nvkm_wr32(device, 0x61149c, 0x00000001);
nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000, 0x00000000);
+ nvkm_wr32(device, 0x640000, chan->suspend_put);
nvkm_wr32(device, 0x610490, 0x01000013);
/* wait for it to go inactive */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
index 4592d0e69fec..e20a48f201f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
@@ -167,6 +167,7 @@ gv100_disp_core_fini(struct nv50_disp_chan *chan)
nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000000);
gv100_disp_core_idle(chan);
nvkm_mask(device, 0x6104e0, 0x00000002, 0x00000000);
+ chan->suspend_put = nvkm_rd32(device, 0x680000);
}
static int
@@ -181,7 +182,7 @@ gv100_disp_core_init(struct nv50_disp_chan *chan)
nvkm_wr32(device, 0x610b2c, 0x00000040);
nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x680000, 0x00000000);
+ nvkm_wr32(device, 0x680000, chan->suspend_put);
nvkm_wr32(device, 0x6104e0, 0x00000013);
return gv100_disp_core_idle(chan);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
index 55db9a22b4be..660310b27f9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -179,6 +179,8 @@ nv50_disp_core_fini(struct nv50_disp_chan *chan)
nvkm_error(subdev, "core fini: %08x\n",
nvkm_rd32(device, 0x610200));
}
+
+ chan->suspend_put = nvkm_rd32(device, 0x640000);
}
static int
@@ -198,7 +200,7 @@ nv50_disp_core_init(struct nv50_disp_chan *chan)
nvkm_wr32(device, 0x610208, 0x00010000);
nvkm_wr32(device, 0x61020c, 0x00000000);
nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000, 0x00000000);
+ nvkm_wr32(device, 0x640000, chan->suspend_put);
nvkm_wr32(device, 0x610200, 0x01000013);
/* wait for it to go inactive */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index edf7dd0d931d..76425e8586da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -53,6 +53,8 @@ gf119_disp_dmac_fini(struct nv50_disp_chan *chan)
nvkm_error(subdev, "ch %d fini: %08x\n", user,
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
+
+ chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
}
static int
@@ -68,7 +70,7 @@ gf119_disp_dmac_init(struct nv50_disp_chan *chan)
nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index f21a433199aa..da258df268d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -38,7 +38,7 @@ gp102_disp_dmac_init(struct nv50_disp_chan *chan)
nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
index eac0e42da354..fdb624ac6b87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
@@ -50,10 +50,12 @@ void
gv100_disp_dmac_fini(struct nv50_disp_chan *chan)
{
struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+ const u32 uoff = (chan->chid.ctrl - 1) * 0x1000;
const u32 coff = chan->chid.ctrl * 0x04;
nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000000);
gv100_disp_dmac_idle(chan);
nvkm_mask(device, 0x6104e0 + coff, 0x00000002, 0x00000000);
+ chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
}
int
@@ -71,7 +73,7 @@ gv100_disp_dmac_init(struct nv50_disp_chan *chan)
nvkm_wr32(device, 0x610b2c + poff, 0x00000040);
nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x690000 + uoff, 0x00000000);
+ nvkm_wr32(device, 0x690000 + uoff, chan->suspend_put);
nvkm_wr32(device, 0x6104e0 + coff, 0x00000013);
return gv100_disp_dmac_idle(chan);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 9e8a9d7a9b68..d0a7da96d62b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -94,6 +94,8 @@ nv50_disp_dmac_fini(struct nv50_disp_chan *chan)
nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
+
+ chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
}
static int
@@ -109,7 +111,7 @@ nv50_disp_dmac_init(struct nv50_disp_chan *chan)
nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
- nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
index 7147dc6d9018..1ccfc8314812 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
@@ -23,55 +23,55 @@ void pack_hdmi_infoframe(struct packed_hdmi_infoframe *packed_frame,
*/
case 17:
subpack1_high = (raw_frame[16] << 16);
- /* fall through */
+ fallthrough;
case 16:
subpack1_high |= (raw_frame[15] << 8);
- /* fall through */
+ fallthrough;
case 15:
subpack1_high |= raw_frame[14];
- /* fall through */
+ fallthrough;
case 14:
subpack1_low = (raw_frame[13] << 24);
- /* fall through */
+ fallthrough;
case 13:
subpack1_low |= (raw_frame[12] << 16);
- /* fall through */
+ fallthrough;
case 12:
subpack1_low |= (raw_frame[11] << 8);
- /* fall through */
+ fallthrough;
case 11:
subpack1_low |= raw_frame[10];
- /* fall through */
+ fallthrough;
case 10:
subpack0_high = (raw_frame[9] << 16);
- /* fall through */
+ fallthrough;
case 9:
subpack0_high |= (raw_frame[8] << 8);
- /* fall through */
+ fallthrough;
case 8:
subpack0_high |= raw_frame[7];
- /* fall through */
+ fallthrough;
case 7:
subpack0_low = (raw_frame[6] << 24);
- /* fall through */
+ fallthrough;
case 6:
subpack0_low |= (raw_frame[5] << 16);
- /* fall through */
+ fallthrough;
case 5:
subpack0_low |= (raw_frame[4] << 8);
- /* fall through */
+ fallthrough;
case 4:
subpack0_low |= raw_frame[3];
- /* fall through */
+ fallthrough;
case 3:
header = (raw_frame[2] << 16);
- /* fall through */
+ fallthrough;
case 2:
header |= (raw_frame[1] << 8);
- /* fall through */
+ fallthrough;
case 1:
header |= raw_frame[0];
- /* fall through */
+ fallthrough;
case 0:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
index bf6d41fb0c9f..bb32befa6ad4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
@@ -24,7 +24,7 @@
#include "hdmi.h"
void
-gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc)
+gm200_hdmi_scdc(struct nvkm_ior *ior, u8 scdc)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(ior);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 1a200a9ba4e4..09f3038eff26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -66,7 +66,7 @@ struct nvkm_ior_func {
void (*ctrl)(struct nvkm_ior *, int head, bool enable,
u8 max_ac_packet, u8 rekey, u8 *avi, u8 avi_size,
u8 *vendor, u8 vendor_size);
- void (*scdc)(struct nvkm_ior *, int head, u8 scdc);
+ void (*scdc)(struct nvkm_ior *, u8 scdc);
} hdmi;
struct {
@@ -156,7 +156,7 @@ void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gk104_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
-void gm200_hdmi_scdc(struct nvkm_ior *, int, u8);
+void gm200_hdmi_scdc(struct nvkm_ior *, u8);
void gt215_hda_hpd(struct nvkm_ior *, int, bool);
void gt215_hda_eld(struct nvkm_ior *, int, u8 *, u8);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index fb5de44e4b8d..ecde98dd2454 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -205,8 +205,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
vendor, vendor_size);
if (outp->ior->func->hdmi.scdc)
- outp->ior->func->hdmi.scdc(
- outp->ior, hidx, args->v0.scdc);
+ outp->ior->func->hdmi.scdc(outp->ior, args->v0.scdc);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
index 7f1adab21a5f..5159d5df20a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -122,7 +122,7 @@ nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
break;
case NV_MEM_ACCESS_WO:
dmaobj->flags0 |= 0x00008000;
- /* fall through */
+ fallthrough;
case NV_MEM_ACCESS_RW:
dmaobj->flags2 |= 0x00000002;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 93493b335d76..c1d1b1aa5bc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -117,10 +117,10 @@ nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
switch (mthd) {
case 0x0000 ... 0x0000: /* subchannel's engine -> software */
nvkm_wr32(device, 0x003280, (engine &= ~mask));
- /* fall through */
+ fallthrough;
case 0x0180 ... 0x01fc: /* handle -> instance */
data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
- /* fall through */
+ fallthrough;
case 0x0100 ... 0x017c:
case 0x0200 ... 0x1ffc: /* pass method down to sw */
if (!(engine & mask) && sw)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 47c16821c37f..2d61fd832ddb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -81,7 +81,7 @@ nv40_fifo_init(struct nvkm_fifo *base)
case 0x49:
case 0x4b:
nvkm_wr32(device, 0x002230, 0x00000001);
- /* fall through */
+ fallthrough;
case 0x40:
case 0x41:
case 0x42:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index f2f5636efac4..749f73fc45a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -741,7 +741,7 @@ gf100_gr_fecs_ctrl_ctxsw(struct gf100_gr *gr, u32 mthd)
return -ETIMEDOUT;
}
-int
+static int
gf100_gr_fecs_start_ctxsw(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
@@ -756,7 +756,7 @@ gf100_gr_fecs_start_ctxsw(struct nvkm_gr *base)
return ret;
}
-int
+static int
gf100_gr_fecs_stop_ctxsw(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
@@ -2032,7 +2032,7 @@ gf100_gr_fini(struct nvkm_gr *base, bool suspend)
return 0;
}
-void *
+static void *
gf100_gr_dtor(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
@@ -2103,7 +2103,7 @@ gf100_gr_new_(const struct gf100_gr_fwif *fwif,
fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr);
if (IS_ERR(fwif))
- return -ENODEV;
+ return PTR_ERR(fwif);
gr->func = fwif->func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 88bcb57c2e07..dfd5dd74f0d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -404,6 +404,7 @@ int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *);
int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver);
+int gm200_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *);
int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr;
extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index e56880f3e3bd..6d4d72851610 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -33,7 +33,7 @@ struct gk20a_fw_av
u32 data;
};
-int
+static int
gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name,
int ver, struct gf100_gr_pack **ppack)
{
@@ -83,7 +83,7 @@ struct gk20a_fw_aiv
u32 data;
};
-int
+static int
gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name,
int ver, struct gf100_gr_pack **ppack)
{
@@ -126,7 +126,7 @@ end:
return ret;
}
-int
+static int
gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name,
int ver, struct gf100_gr_pack **ppack)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 3d67cfb08395..815137047518 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -32,6 +32,13 @@
#include <nvif/class.h>
+int
+gm200_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ nvkm_warn(&gr->base.engine.subdev, "firmware unavailable\n");
+ return -ENODEV;
+}
+
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@@ -275,7 +282,8 @@ MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gm200_gr_fwif[] = {
- { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 09d8c5d5b000..1aab691fa71c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -175,7 +175,8 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gm20b_gr_fwif[] = {
- { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr },
+ { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 33c8634ae567..ddba7ce937c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -150,7 +150,8 @@ MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gp100_gr_fwif[] = {
- { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 7baf67f743f4..c083f3757ff7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -146,7 +146,8 @@ MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gp102_gr_fwif[] = {
- { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index d9b8ef875f8d..f6a31e9a8cc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -87,7 +87,8 @@ MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gp104_gr_fwif[] = {
- { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 2b1ad5522184..2c80c6a75b56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -76,7 +76,8 @@ MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gp107_gr_fwif[] = {
- { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
index 113e4c1ba9e8..2be8f416dd6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
@@ -86,7 +86,8 @@ MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gp108_gr_fwif[] = {
- { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index eaf913eb5aa3..6edc4bc7ed44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -88,7 +88,8 @@ MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gp10b_gr_fwif[] = {
- { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr },
+ { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 70639d88b8e6..2189a8f4e644 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -135,7 +135,8 @@ MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
gv100_gr_fwif[] = {
- { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index a9efa4d78be9..6039f9948aa2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -192,7 +192,8 @@ MODULE_FIRMWARE("nvidia/tu116/gr/sw_method_init.bin");
static const struct gf100_gr_fwif
tu102_gr_fwif[] = {
- { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ { -1, gm200_gr_nofw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index 368f2a0042ff..bccf7acb7f98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -28,8 +28,16 @@
#include <nvfw/flcn.h>
#include <nvfw/sec2.h>
+int
+gp102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
+ const struct nvkm_sec2_fwif *fwif)
+{
+ nvkm_warn(&sec2->engine.subdev, "firmware unavailable\n");
+ return 0;
+}
+
static int
-gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr)
+gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nvfw_falcon_msg *hdr)
{
struct nv_sec2_acr_bootstrap_falcon_msg *msg =
container_of(hdr, typeof(*msg), msg.hdr);
@@ -115,6 +123,9 @@ gp102_sec2_acr_0 = {
.bld_write = gp102_sec2_acr_bld_write,
.bld_patch = gp102_sec2_acr_bld_patch,
.boot = gp102_sec2_acr_boot,
+ .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
+ BIT_ULL(NVKM_ACR_LSF_GPCCS) |
+ BIT_ULL(NVKM_ACR_LSF_SEC2),
.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
};
@@ -294,6 +305,9 @@ gp102_sec2_acr_1 = {
.bld_write = gp102_sec2_acr_bld_write_1,
.bld_patch = gp102_sec2_acr_bld_patch_1,
.boot = gp102_sec2_acr_boot,
+ .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
+ BIT_ULL(NVKM_ACR_LSF_GPCCS) |
+ BIT_ULL(NVKM_ACR_LSF_SEC2),
.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
};
@@ -322,8 +336,9 @@ MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
static const struct nvkm_sec2_fwif
gp102_sec2_fwif[] = {
- { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
- { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
+ { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+ { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
+ { -1, gp102_sec2_nofw, &gp102_sec2 },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index bb88117e018a..8cbc0b7d0a27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -20,6 +20,7 @@ struct nvkm_sec2_fwif {
const struct nvkm_acr_lsf_func *acr;
};
+int gp102_sec2_nofw(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
extern const struct nvkm_sec2_func gp102_sec2;
extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index a8295653ceab..a231c1c6c0a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -49,13 +49,6 @@ tu102_sec2 = {
.initmsg = gp102_sec2_initmsg,
};
-static int
-tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
- const struct nvkm_sec2_fwif *fwif)
-{
- return 0;
-}
-
MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin");
MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin");
@@ -75,7 +68,7 @@ MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin");
static const struct nvkm_sec2_fwif
tu102_sec2_fwif[] = {
{ 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
- { -1, tu102_sec2_nofw, &tu102_sec2 }
+ { -1, gp102_sec2_nofw, &tu102_sec2 }
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
index 40e3f3fc83ef..44cf6a8862e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
@@ -58,7 +58,7 @@ nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
static void
nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
{
- struct nv_falcon_cmd cmd;
+ struct nvfw_falcon_cmd cmd;
cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
cmd.size = sizeof(cmd);
@@ -97,7 +97,7 @@ nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
}
static int
-nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
+nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd)
{
static unsigned timeout = 2000;
unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
@@ -121,7 +121,7 @@ nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
#define CMD_FLAGS_INTR BIT(1)
int
-nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd,
+nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd,
nvkm_falcon_qmgr_callback cb, void *priv,
unsigned long timeout)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
index cbfe09a561a1..e74371dffc76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
@@ -74,7 +74,7 @@ nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
}
static int
-nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
{
int ret = 0;
@@ -112,7 +112,7 @@ close:
}
static int
-nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
{
struct nvkm_falcon_qmgr_seq *seq;
@@ -144,7 +144,7 @@ nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
* stack space to work with.
*/
u8 msg_buffer[MSG_BUF_SIZE];
- struct nv_falcon_msg *hdr = (void *)msg_buffer;
+ struct nvfw_falcon_msg *hdr = (void *)msg_buffer;
while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
nvkm_falcon_msgq_exec(msgq, hdr);
@@ -155,7 +155,7 @@ nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
void *data, u32 size)
{
struct nvkm_falcon *falcon = msgq->qmgr->falcon;
- struct nv_falcon_msg *hdr = data;
+ struct nvfw_falcon_msg *hdr = data;
int ret;
msgq->head_reg = falcon->func->msgq.head;
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
index a45cd705e4f7..976cb7b7aa99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
@@ -3,7 +3,7 @@
#define __NVKM_FALCON_QMGR_H__
#include <core/falcon.h>
-#define HDR_SIZE sizeof(struct nv_falcon_msg)
+#define HDR_SIZE sizeof(struct nvfw_falcon_msg)
#define QUEUE_ALIGNMENT 4
/* max size of the messages we can receive */
#define MSG_BUF_SIZE 128
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
index 0d063b8317f7..bef790ad8f2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
@@ -45,9 +45,8 @@ wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr)
nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
}
-void
-lsb_header_tail_dump(struct nvkm_subdev *subdev,
- struct lsb_header_tail *hdr)
+static void
+lsb_header_tail_dump(struct nvkm_subdev *subdev, struct lsb_header_tail *hdr)
{
nvkm_debug(subdev, "lsbHeader\n");
nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
index e4866a02e457..c962df9910dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -141,14 +141,24 @@ nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
struct nvkm_acr *acr = device->acr;
unsigned long id;
+ /* If there's no LS FW managing bootstrapping of other LS falcons,
+ * we depend on the HS firmware being able to do it instead.
+ */
if (!acrflcn) {
- int ret = nvkm_acr_reload(acr);
- if (ret)
- return ret;
+ /* Which isn't possible everywhere... */
+ if ((mask & acr->func->bootstrap_falcons) == mask) {
+ int ret = nvkm_acr_reload(acr);
+ if (ret)
+ return ret;
- return acr->done ? 0 : -EINVAL;
+ return acr->done ? 0 : -EINVAL;
+ }
+ return -ENOSYS;
}
+ if ((mask & acrflcn->func->bootstrap_falcons) != mask)
+ return -ENOSYS;
+
if (acrflcn->func->bootstrap_multiple_falcons) {
return acrflcn->func->
bootstrap_multiple_falcons(acrflcn->falcon, mask);
@@ -167,13 +177,10 @@ bool
nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id)
{
struct nvkm_acr *acr = device->acr;
- struct nvkm_acr_lsf *lsf;
if (acr) {
- list_for_each_entry(lsf, &acr->lsf, head) {
- if (lsf->id == id)
- return true;
- }
+ if (acr->managed_falcons & BIT_ULL(id))
+ return true;
}
return false;
@@ -213,6 +220,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
struct nvkm_acr_lsfw *lsfw, *lsft;
struct nvkm_acr_lsf *lsf;
u32 wpr_size = 0;
+ u64 falcons;
int ret, i;
if (list_empty(&acr->hsfw)) {
@@ -248,12 +256,28 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
lsf->falcon = lsfw->falcon;
lsf->id = lsfw->id;
list_add_tail(&lsf->head, &acr->lsf);
+ acr->managed_falcons |= BIT_ULL(lsf->id);
}
/* Ensure the falcon that'll provide ACR functions is booted first. */
lsf = nvkm_acr_falcon(device);
- if (lsf)
+ if (lsf) {
+ falcons = lsf->func->bootstrap_falcons;
list_move(&lsf->head, &acr->lsf);
+ } else {
+ falcons = acr->func->bootstrap_falcons;
+ }
+
+ /* Cull falcons that can't be bootstrapped, or the HSFW can fail to
+ * boot and leave the GPU in a weird state.
+ */
+ list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+ if (!(falcons & BIT_ULL(lsfw->id))) {
+ nvkm_warn(subdev, "%s falcon cannot be bootstrapped\n",
+ nvkm_acr_lsf_id(lsfw->id));
+ nvkm_acr_lsfw_del(lsfw);
+ }
+ }
if (!acr->wpr_fw || acr->wpr_comp)
wpr_size = acr->func->wpr_layout(acr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
index 9a6394085cf0..cd41b2e6cc87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -32,6 +32,17 @@
#include <nvfw/acr.h>
#include <nvfw/flcn.h>
+const struct nvkm_acr_func
+gm200_acr = {
+};
+
+int
+gm200_acr_nofw(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ nvkm_warn(&acr->subdev, "firmware unavailable\n");
+ return 0;
+}
+
int
gm200_acr_init(struct nvkm_acr *acr)
{
@@ -425,7 +436,7 @@ gm200_acr_load_fwif[] = {
};
static const struct nvkm_acr_func
-gm200_acr = {
+gm200_acr_0 = {
.load = gm200_acr_load_fwif,
.unload = gm200_acr_unload_fwif,
.wpr_parse = gm200_acr_wpr_parse,
@@ -435,6 +446,8 @@ gm200_acr = {
.wpr_patch = gm200_acr_wpr_patch,
.wpr_check = gm200_acr_wpr_check,
.init = gm200_acr_init,
+ .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
+ BIT_ULL(NVKM_ACR_LSF_GPCCS),
};
static int
@@ -459,7 +472,8 @@ gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
static const struct nvkm_acr_fwif
gm200_acr_fwif[] = {
- { 0, gm200_acr_load, &gm200_acr },
+ { 0, gm200_acr_load, &gm200_acr_0 },
+ { -1, gm200_acr_nofw, &gm200_acr },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
index 034a6ede70c7..b1ecc58152cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
@@ -123,7 +123,8 @@ gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
static const struct nvkm_acr_fwif
gm20b_acr_fwif[] = {
- { 0, gm20b_acr_load, &gm20b_acr },
+ { 0, gm20b_acr_load, &gm20b_acr },
+ { -1, gm200_acr_nofw, &gm200_acr },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
index 49e11c46d525..80eb9d8dbc80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -270,7 +270,8 @@ gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
static const struct nvkm_acr_fwif
gp102_acr_fwif[] = {
- { 0, gp102_acr_load, &gp102_acr },
+ { 0, gp102_acr_load, &gp102_acr },
+ { -1, gm200_acr_nofw, &gm200_acr },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
index f10dc9112678..67a7c141004b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
@@ -100,7 +100,8 @@ gp108_acr = {
static const struct nvkm_acr_fwif
gp108_acr_fwif[] = {
- { 0, gp102_acr_load, &gp108_acr },
+ { 0, gp102_acr_load, &gp108_acr },
+ { -1, gm200_acr_nofw, &gm200_acr },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
index 39de64292a41..8249f0d2d81d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
@@ -46,7 +46,8 @@ gp10b_acr = {
static const struct nvkm_acr_fwif
gp10b_acr_fwif[] = {
- { 0, gm20b_acr_load, &gp10b_acr },
+ { 0, gm20b_acr_load, &gp10b_acr },
+ { -1, gm200_acr_nofw, &gm200_acr },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
index 07d1830126ab..9b1cf6711ae9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
@@ -60,7 +60,7 @@ nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr,
{
struct nvkm_acr_lsfw *lsfw;
- if (!acr)
+ if (!acr || list_empty(&acr->hsfw))
return ERR_PTR(-ENOSYS);
lsfw = nvkm_acr_lsfw_get(acr, id);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
index d8ba72806d39..d71af17a169a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
@@ -10,6 +10,7 @@ struct nvkm_acr_fwif {
const struct nvkm_acr_func *func;
};
+int gm200_acr_nofw(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
@@ -27,8 +28,10 @@ struct nvkm_acr_func {
void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit);
int (*init)(struct nvkm_acr *);
void (*fini)(struct nvkm_acr *);
+ u64 bootstrap_falcons;
};
+extern const struct nvkm_acr_func gm200_acr;
int gm200_acr_wpr_parse(struct nvkm_acr *);
u32 gm200_acr_wpr_layout(struct nvkm_acr *);
int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
index d28d8f36ae24..c4981bce9a2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -219,6 +219,7 @@ tu102_acr_load(struct nvkm_acr *acr, int version,
static const struct nvkm_acr_fwif
tu102_acr_fwif[] = {
{ 0, tu102_acr_load, &tu102_acr },
+ { -1, gm200_acr_nofw, &gm200_acr },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
index a8d5d67feeaf..8698f260b988 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
@@ -172,8 +172,8 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
outp->dpconf.link_nr = 1;
break;
}
+ fallthrough;
- /* fall-through... */
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_LVDS:
outp->link = (conf & 0x00000030) >> 4;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index b099d1209be8..c694501ae206 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -100,7 +100,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
switch (*ver) {
case 0x20:
info->mask |= 0x00c0; /* match any link */
- /* fall-through */
+ fallthrough;
case 0x21:
case 0x30:
info->flags = nvbios_rd08(bios, data + 0x05);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 7112992e0e38..f039388f0676 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -135,7 +135,7 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
break;
case 0x30:
info->script = nvbios_rd16(bios, perf + 0x02);
- /* fall through */
+ fallthrough;
case 0x35:
info->fanspeed = nvbios_rd08(bios, perf + 0x06);
info->voltage = nvbios_rd08(bios, perf + 0x07);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index bda6cc9a7aaf..350f10a3de37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -134,7 +134,7 @@ pll_map(struct nvkm_bios *bios)
device->chipset == 0xaa ||
device->chipset == 0xac)
return g84_pll_mapping;
- /* fall through */
+ fallthrough;
default:
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 20ff5173cf8f..2da45e29f68b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -115,21 +115,21 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
switch (min_t(u8, *hdr, 25)) {
case 25:
p->timing_10_24 = nvbios_rd08(bios, data + 0x18);
- /* fall through */
+ fallthrough;
case 24:
case 23:
case 22:
p->timing_10_21 = nvbios_rd08(bios, data + 0x15);
- /* fall through */
+ fallthrough;
case 21:
p->timing_10_20 = nvbios_rd08(bios, data + 0x14);
- /* fall through */
+ fallthrough;
case 20:
p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
- /* fall through */
+ fallthrough;
case 19:
p->timing_10_18 = nvbios_rd08(bios, data + 0x12);
- /* fall through */
+ fallthrough;
case 18:
case 17:
p->timing_10_16 = nvbios_rd08(bios, data + 0x10);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 40e564524b7a..dc184e857f85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -90,7 +90,7 @@ nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
case NVKM_CLK_BOOST_NONE:
if (clk->base_khz && freq > clk->base_khz)
return false;
- /* fall through */
+ fallthrough;
case NVKM_CLK_BOOST_BIOS:
if (clk->boost_khz && freq > clk->boost_khz)
return false;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index 4f000237796f..efa50274df97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -363,7 +363,7 @@ mcp77_clk_prog(struct nvkm_clk *base)
switch (clk->vsrc) {
case nv_clk_src_cclk:
mast |= 0x00400000;
- /* fall through */
+ fallthrough;
default:
nvkm_wr32(device, 0x4600, clk->vdiv);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index c3dae05348eb..317ce9fb8225 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -119,11 +119,11 @@ powerctrl_1_shift(int chip_version, int reg)
switch (reg) {
case 0x680520:
- shift += 4; /* fall through */
+ shift += 4; fallthrough;
case 0x680508:
- shift += 4; /* fall through */
+ shift += 4; fallthrough;
case 0x680504:
- shift += 4; /* fall through */
+ shift += 4; fallthrough;
case 0x680500:
shift += 4;
}
@@ -245,11 +245,11 @@ setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
switch (reg1) {
case 0x680504:
- shift_c040 += 2; /* fall through */
+ shift_c040 += 2; fallthrough;
case 0x680500:
- shift_c040 += 2; /* fall through */
+ shift_c040 += 2; fallthrough;
case 0x680520:
- shift_c040 += 2; /* fall through */
+ shift_c040 += 2; fallthrough;
case 0x680508:
shift_c040 += 2;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 5f4c287d7943..97b3a28ca5c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -131,13 +131,13 @@ nv40_ram_prog(struct nvkm_ram *base)
nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x004048, ram->coef);
nvkm_wr32(device, 0x004030, ram->coef);
- /* fall through */
+ fallthrough;
case 0x43:
case 0x49:
case 0x4b:
nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x00403c, ram->coef);
- /* fall through */
+ fallthrough;
default:
nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x004024, ram->coef);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index c8ab1b5741a3..db7769cb33eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
- /* transaction request, wait up to 1ms for it to complete */
+ /* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
- timeout = 1000;
+ timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
udelay(1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index 7ef60895f43a..edb6148cbca0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
- /* transaction request, wait up to 1ms for it to complete */
+ /* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
- timeout = 1000;
+ timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + base);
udelay(1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
index ff8629de97d6..45c62f5ef782 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
@@ -23,7 +23,7 @@
#include "priv.h"
-void
+static void
gp10b_mc_init(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index ee11ccaf0563..de91e9a26172 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -61,7 +61,7 @@ nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
kfree(pt);
}
-struct nvkm_mmu_pt *
+static struct nvkm_mmu_pt *
nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
{
struct nvkm_mmu_pt *pt;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index b21e82eb0916..94081f35f967 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
-const u8 *
+static const u8 *
tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 199f94e15c5f..710f3f8dc7c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1030,7 +1030,7 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
return 0;
}
-int
+static int
nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
u32 pd_header, bool managed, u64 addr, u64 size,
struct lock_class_key *key, const char *name,
@@ -1204,7 +1204,6 @@ nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
/*TODO:
* - Avoid PT readback (for dma_unmap etc), this might end up being dealt
* with inside HMM, which would be a lot nicer for us to deal with.
- * - Multiple page sizes (particularly for huge page support).
* - Support for systems without a 4KiB page size.
*/
int
@@ -1220,8 +1219,8 @@ nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
/* Only support mapping where the page size of the incoming page
* array matches a page size available for direct mapping.
*/
- while (page->shift && page->shift != shift &&
- page->desc->func->pfn == NULL)
+ while (page->shift && (page->shift != shift ||
+ page->desc->func->pfn == NULL))
page++;
if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index d3f8f916d0db..a2b179568970 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -163,9 +163,6 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, bool managed, u64 addr, u64 size,
struct lock_class_key *, const char *name,
struct nvkm_vmm **);
-int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
- u32 pd_header, bool managed, u64 addr, u64 size,
- struct lock_class_key *, const char *name, struct nvkm_vmm *);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
u64 addr, u64 size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index d86287565542..9539e6cda4d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -258,12 +258,94 @@ gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
}
+static void
+gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ struct device *dev = vmm->mmu->subdev.device->dev;
+ dma_addr_t addr;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+
+ if ((data & (3ULL << 1)) != 0) {
+ addr = (data >> 8) << 12;
+ dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+}
+
+static bool
+gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ bool dma = false;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+
+ if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
+ VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0));
+ dma = true;
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+ return dma;
+}
+
+static void
+gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ struct device *dev = vmm->mmu->subdev.device->dev;
+ dma_addr_t addr;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u64 data = 0;
+
+ if (!(*map->pfn & NVKM_VMM_PFN_W))
+ data |= BIT_ULL(6); /* RO. */
+
+ if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
+ addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
+ addr = dma_map_page(dev, pfn_to_page(addr), 0,
+ 1UL << 21, DMA_BIDIRECTIONAL);
+ if (!WARN_ON(dma_mapping_error(dev, addr))) {
+ data |= addr >> 4;
+ data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
+ data |= BIT_ULL(3); /* VOL. */
+ data |= BIT_ULL(0); /* VALID. */
+ }
+ } else {
+ data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
+ data |= BIT_ULL(0); /* VALID. */
+ }
+
+ VMM_WO064(pt, vmm, ptei++ * 16, data);
+ map->pfn++;
+ }
+ nvkm_done(pt->memory);
+}
+
static const struct nvkm_vmm_desc_func
gp100_vmm_desc_pd0 = {
.unmap = gp100_vmm_pd0_unmap,
.sparse = gp100_vmm_pd0_sparse,
.pde = gp100_vmm_pd0_pde,
.mem = gp100_vmm_pd0_mem,
+ .pfn = gp100_vmm_pd0_pfn,
+ .pfn_clear = gp100_vmm_pd0_pfn_clear,
+ .pfn_unmap = gp100_vmm_pd0_pfn_unmap,
};
static void
@@ -466,7 +548,6 @@ void
gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
- type = 0; /*XXX: need to confirm stuff works with depth enabled... */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
type |= 0x00000001; /* PAGE_ALL */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index be91cffc3b52..b1294d0076c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -28,9 +28,9 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
struct nvkm_device *device = subdev->device;
- u32 type = depth << 24; /*XXX: not confirmed */
+ u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
- type = 0x00000001; /* PAGE_ALL */
+ type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index 2a6150ab5611..70e2c414bb7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -159,7 +159,7 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
break;
case 0x0e: /* eDP, falls through to DPint */
ctx.outp[1] |= 0x00010000;
- /* fall through */
+ fallthrough;
case 0x07: /* DP internal, wtf is this?? HP8670w */
ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
type = DCB_CONNECTOR_eDP;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index a76c2a7bd696..eafc9321a08a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -9,7 +9,7 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
nvkm-y += nvkm/subdev/pmu/gk208.o
nvkm-y += nvkm/subdev/pmu/gk20a.o
nvkm-y += nvkm/subdev/pmu/gm107.o
+nvkm-y += nvkm/subdev/pmu/gm200.o
nvkm-y += nvkm/subdev/pmu/gm20b.o
-nvkm-y += nvkm/subdev/pmu/gp100.o
nvkm-y += nvkm/subdev/pmu/gp102.o
nvkm-y += nvkm/subdev/pmu/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
index 09e05db21ff5..383376addb41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
@@ -24,20 +24,28 @@
#include "priv.h"
static const struct nvkm_pmu_func
-gp100_pmu = {
+gm200_pmu = {
.flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
};
+
+int
+gm200_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
+{
+ nvkm_warn(&pmu->subdev, "firmware unavailable\n");
+ return 0;
+}
+
static const struct nvkm_pmu_fwif
-gp100_pmu_fwif[] = {
- { -1, gf100_pmu_nofw, &gp100_pmu },
+gm200_pmu_fwif[] = {
+ { -1, gm200_pmu_nofw, &gm200_pmu },
{}
};
int
-gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm200_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gm200_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 82571032a07d..8f6ed5373ea1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -28,7 +28,7 @@
#include <nvfw/pmu.h>
static int
-gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr)
+gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nvfw_falcon_msg *hdr)
{
struct nv_pmu_acr_bootstrap_falcon_msg *msg =
container_of(hdr, typeof(*msg), msg.hdr);
@@ -126,11 +126,14 @@ gm20b_pmu_acr = {
.bld_write = gm20b_pmu_acr_bld_write,
.bld_patch = gm20b_pmu_acr_bld_patch,
.boot = gm20b_pmu_acr_boot,
+ .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
+ BIT_ULL(NVKM_ACR_LSF_FECS) |
+ BIT_ULL(NVKM_ACR_LSF_GPCCS),
.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
};
static int
-gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr)
+gm20b_pmu_acr_init_wpr_callback(void *priv, struct nvfw_falcon_msg *hdr)
{
struct nv_pmu_acr_init_wpr_region_msg *msg =
container_of(hdr, typeof(*msg), msg.hdr);
@@ -231,7 +234,8 @@ gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
static const struct nvkm_pmu_fwif
gm20b_pmu_fwif[] = {
- { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
+ { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
+ { -1, gm200_pmu_nofw, &gm20b_pmu },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 262b8a3dd507..3d8ce14dba7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -46,7 +46,7 @@ gp102_pmu = {
static const struct nvkm_pmu_fwif
gp102_pmu_fwif[] = {
- { -1, gf100_pmu_nofw, &gp102_pmu },
+ { -1, gm200_pmu_nofw, &gp102_pmu },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index 5b81c7320479..9c237c426599 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -28,7 +28,7 @@
static int
gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv,
- struct nv_falcon_msg *hdr)
+ struct nvfw_falcon_msg *hdr)
{
struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg =
container_of(hdr, typeof(*msg), msg.hdr);
@@ -69,6 +69,9 @@ gp10b_pmu_acr = {
.bld_write = gm20b_pmu_acr_bld_write,
.bld_patch = gm20b_pmu_acr_bld_patch,
.boot = gm20b_pmu_acr_boot,
+ .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
+ BIT_ULL(NVKM_ACR_LSF_FECS) |
+ BIT_ULL(NVKM_ACR_LSF_GPCCS),
.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
};
@@ -90,7 +93,8 @@ MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
static const struct nvkm_pmu_fwif
gp10b_pmu_fwif[] = {
- { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr },
+ { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr },
+ { -1, gm200_pmu_nofw, &gp10b_pmu },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index f470859244de..276b6d778e53 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -59,6 +59,7 @@ struct nvkm_pmu_fwif {
};
int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+int gm200_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index 4caf401d001a..c08097f2aff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -36,7 +36,7 @@ gt215_therm_fan_sense(struct nvkm_therm *therm)
return -ENODEV;
}
-void
+static void
gt215_therm_init(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 88493538a147..af6ea5480c81 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2583,6 +2583,7 @@ static const struct panel_desc logicpd_type_28 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct panel_desc mitsubishi_aa070mc01 = {
@@ -2746,6 +2747,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing nlt_nl192108ac18_02d_timing = {
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 1d8e07b8b19e..bf9dc451583a 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -149,10 +149,9 @@ static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
return -1;
}
-static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+static void qxl_ttm_backend_unbind(struct ttm_tt *ttm)
{
/* Not implemented */
- return -1;
}
static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 54af06df865b..004344dce140 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -591,7 +591,7 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
return 0;
}
-static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+static void radeon_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
@@ -599,8 +599,6 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
if (gtt->userptr)
radeon_ttm_tt_unpin_userptr(ttm);
-
- return 0;
}
static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 0919f1f159a4..f65d1489dc50 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI
config DRM_RCAR_LVDS
tristate "R-Car DU LVDS Encoder Support"
depends on DRM && DRM_BRIDGE && OF
+ select DRM_KMS_HELPER
select DRM_PANEL
select OF_FLATTREE
select OF_OVERLAY
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index ce07ddc3e058..557cbe5ab35f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -259,9 +259,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
unsigned long reg;
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
- reg & SUN4I_HDMI_HPD_HIGH,
- 0, 500000)) {
+ reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
+ if (reg & SUN4I_HDMI_HPD_HIGH) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 9b308b572eac..9a0b3240bc58 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -368,6 +368,12 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
h_size = window->src.w * bpp;
v_size = window->src.h;
+ if (window->reflect_x)
+ h_offset += (window->src.w - 1) * bpp;
+
+ if (window->reflect_y)
+ v_offset += window->src.h - 1;
+
value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
tegra_plane_writel(plane, value, DC_WIN_PRESCALED_SIZE);
@@ -404,9 +410,6 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
tegra_plane_writel(plane, window->stride[0], DC_WIN_LINE_STRIDE);
}
- if (window->bottom_up)
- v_offset += window->src.h - 1;
-
tegra_plane_writel(plane, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_plane_writel(plane, v_offset, DC_WINBUF_ADDR_V_OFFSET);
@@ -470,7 +473,10 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
value |= COLOR_EXPAND;
}
- if (window->bottom_up)
+ if (window->reflect_x)
+ value |= H_DIRECTION;
+
+ if (window->reflect_y)
value |= V_DIRECTION;
if (tegra_plane_use_horizontal_filtering(plane, window)) {
@@ -601,7 +607,10 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
- unsigned int rotation = DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y;
+ unsigned int supported_rotation = DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y;
+ unsigned int rotation = state->rotation;
struct tegra_bo_tiling *tiling = &plane_state->tiling;
struct tegra_plane *tegra = to_tegra_plane(plane);
struct tegra_dc *dc = to_tegra_dc(state->crtc);
@@ -639,12 +648,26 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- rotation = drm_rotation_simplify(state->rotation, rotation);
+ /*
+ * Older userspace used custom BO flag in order to specify the Y
+ * reflection, while modern userspace uses the generic DRM rotation
+ * property in order to achieve the same result. The legacy BO flag
+ * duplicates the DRM rotation property when both are set.
+ */
+ if (tegra_fb_is_bottom_up(state->fb))
+ rotation |= DRM_MODE_REFLECT_Y;
+
+ rotation = drm_rotation_simplify(rotation, supported_rotation);
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ plane_state->reflect_x = true;
+ else
+ plane_state->reflect_x = false;
if (rotation & DRM_MODE_REFLECT_Y)
- plane_state->bottom_up = true;
+ plane_state->reflect_y = true;
else
- plane_state->bottom_up = false;
+ plane_state->reflect_y = false;
/*
* Tegra doesn't support different strides for U and V planes so we
@@ -706,7 +729,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.dst.w = drm_rect_width(&plane->state->dst);
window.dst.h = drm_rect_height(&plane->state->dst);
window.bits_per_pixel = fb->format->cpp[0] * 8;
- window.bottom_up = tegra_fb_is_bottom_up(fb) || state->bottom_up;
+ window.reflect_x = state->reflect_x;
+ window.reflect_y = state->reflect_y;
/* copy from state */
window.zpos = plane->state->normalized_zpos;
@@ -792,6 +816,8 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
err = drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
if (err < 0)
dev_err(dc->dev, "failed to create rotation property: %d\n",
@@ -957,6 +983,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
}
drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
+ drm_plane_create_zpos_immutable_property(&plane->base, 255);
return &plane->base;
}
@@ -1079,6 +1106,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
err = drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
if (err < 0)
dev_err(dc->dev, "failed to create rotation property: %d\n",
@@ -2554,10 +2583,8 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->regs);
dc->irq = platform_get_irq(pdev, 0);
- if (dc->irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
+ if (dc->irq < 0)
return -ENXIO;
- }
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV) {
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 3d8ddccd758f..051d03dcb9b0 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -136,7 +136,8 @@ struct tegra_dc_window {
unsigned int stride[2];
unsigned long base[3];
unsigned int zpos;
- bool bottom_up;
+ bool reflect_x;
+ bool reflect_y;
struct tegra_bo_tiling tiling;
u32 format;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 38beab9ab4f8..3820e8dff14b 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -670,6 +670,7 @@ static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
{
u32 value;
+ int err;
/*
* XXX Is this still needed? The module reset is deasserted right
@@ -693,7 +694,11 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
- return tegra_mipi_calibrate(dsi->mipi);
+ err = tegra_mipi_calibrate(dsi->mipi);
+ if (err < 0)
+ return err;
+
+ return tegra_mipi_wait(dsi->mipi);
}
static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
@@ -1618,7 +1623,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
- dsi->mipi = tegra_mipi_request(&pdev->dev);
+ dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
if (IS_ERR(dsi->mipi))
return PTR_ERR(dsi->mipi);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 48363f744bb9..1a0d3ba6e525 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -177,6 +177,7 @@ static const u32 gr2d_addr_regs[] = {
GR2D_DSTC_BASE_ADDR,
GR2D_SRCA_BASE_ADDR,
GR2D_SRCB_BASE_ADDR,
+ GR2D_PATBASE_ADDR,
GR2D_SRC_BASE_ADDR_SB,
GR2D_DSTA_BASE_ADDR_SB,
GR2D_DSTB_BASE_ADDR_SB,
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
index 2398486f0699..9b7d66e15b9f 100644
--- a/drivers/gpu/drm/tegra/gr2d.h
+++ b/drivers/gpu/drm/tegra/gr2d.h
@@ -14,6 +14,7 @@
#define GR2D_DSTC_BASE_ADDR 0x2d
#define GR2D_SRCA_BASE_ADDR 0x31
#define GR2D_SRCB_BASE_ADDR 0x32
+#define GR2D_PATBASE_ADDR 0x47
#define GR2D_SRC_BASE_ADDR_SB 0x48
#define GR2D_DSTA_BASE_ADDR_SB 0x49
#define GR2D_DSTB_BASE_ADDR_SB 0x4a
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index c0a528be0369..b0b8154e8104 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -381,10 +381,12 @@ static int gr3d_remove(struct platform_device *pdev)
}
if (gr3d->clk_secondary) {
+ reset_control_assert(gr3d->rst_secondary);
tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
clk_disable_unprepare(gr3d->clk_secondary);
}
+ reset_control_assert(gr3d->rst);
tegra_powergate_power_off(TEGRA_POWERGATE_3D);
clk_disable_unprepare(gr3d->clk);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 8183e617bf6b..22a03f7ffdc1 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -149,7 +149,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub)
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- tegra_windowgroup_enable(wgrp);
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_enable(wgrp);
}
return 0;
@@ -166,7 +168,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- tegra_windowgroup_disable(wgrp);
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_disable(wgrp);
}
}
@@ -944,6 +948,15 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
+ err = devm_of_platform_populate(&pdev->dev);
+ if (err < 0)
+ goto unregister;
+
+ return err;
+
+unregister:
+ host1x_client_unregister(&hub->client);
+ pm_runtime_disable(&pdev->dev);
return err;
}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 9ccfb56e9b01..4cd0461cc508 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -61,7 +61,8 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
copy->tiling = state->tiling;
copy->format = state->format;
copy->swap = state->swap;
- copy->bottom_up = state->bottom_up;
+ copy->reflect_x = state->reflect_x;
+ copy->reflect_y = state->reflect_y;
copy->opaque = state->opaque;
for (i = 0; i < 2; i++)
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index a158a915109a..c691dd79b27b 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -46,7 +46,8 @@ struct tegra_plane_state {
u32 format;
u32 swap;
- bool bottom_up;
+ bool reflect_x;
+ bool reflect_y;
/* used for legacy blending support only */
struct tegra_plane_legacy_blending_state blending[2];
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7cbcf9617f5e..45b5258c77a2 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2946,7 +2946,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
{
int err;
- sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
if (IS_ERR(sor->avdd_io_supply)) {
dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
PTR_ERR(sor->avdd_io_supply));
@@ -2960,7 +2960,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
return err;
}
- sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
if (IS_ERR(sor->vdd_pll_supply)) {
dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
PTR_ERR(sor->vdd_pll_supply));
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 6050dc846894..38f1351140e2 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -82,17 +82,18 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return ret;
}
-static int ttm_agp_unbind(struct ttm_tt *ttm)
+static void ttm_agp_unbind(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem) {
- if (agp_be->mem->is_bound)
- return agp_unbind_memory(agp_be->mem);
+ if (agp_be->mem->is_bound) {
+ agp_unbind_memory(agp_be->mem);
+ return;
+ }
agp_free_memory(agp_be->mem);
agp_be->mem = NULL;
}
- return 0;
}
static void ttm_agp_destroy(struct ttm_tt *ttm)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0768a054a916..f297fd5e02d4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -880,8 +880,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (!fence)
return 0;
- if (no_wait_gpu)
+ if (no_wait_gpu) {
+ dma_fence_put(fence);
return -EBUSY;
+ }
dma_resv_add_shared_fence(bo->base.resv, fence);
@@ -1836,7 +1838,7 @@ out:
}
EXPORT_SYMBOL(ttm_bo_swapout);
-void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+void ttm_bo_swapout_all(void)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 82b893d4249f..d7a6537dd6ee 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -300,8 +300,10 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
break;
case -EBUSY:
case -ERESTARTSYS:
+ dma_fence_put(moving);
return VM_FAULT_NOPAGE;
default:
+ dma_fence_put(moving);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e25d4097aa16..9d1c7177384c 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -244,7 +244,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm_tt_init_fields(ttm, bo, page_flags);
if (ttm_tt_alloc_page_directory(ttm)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -268,7 +267,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&ttm_dma->pages_list);
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -290,7 +288,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
else
ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
if (ret) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -313,11 +310,8 @@ EXPORT_SYMBOL(ttm_dma_tt_fini);
void ttm_tt_unbind(struct ttm_tt *ttm)
{
- int ret;
-
if (ttm->state == tt_bound) {
- ret = ttm->func->unbind(ttm);
- BUG_ON(ret);
+ ttm->func->unbind(ttm);
ttm->state = tt_unbound;
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 470428387878..fb39826f72c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1352,7 +1352,7 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
- ttm_bo_swapout_all(&dev_priv->bdev);
+ ttm_bo_swapout_all();
if (dev_priv->enable_fb)
vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 9ffa9c75a5da..16b385629688 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1069,10 +1069,6 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
if (new_content_type != SAME_AS_DISPLAY) {
struct vmw_surface_metadata metadata = {0};
- metadata.base_size.width = hdisplay;
- metadata.base_size.height = vdisplay;
- metadata.base_size.depth = 1;
-
/*
* If content buffer is a buffer object, then we have to
* construct surface info
@@ -1104,6 +1100,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
metadata = new_vfbs->surface->metadata;
}
+ metadata.base_size.width = hdisplay;
+ metadata.base_size.height = vdisplay;
+ metadata.base_size.depth = 1;
+
if (vps->surf) {
struct drm_vmw_size cur_base_size =
vps->surf->metadata.base_size;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 1d78187eaba6..ab524ab3b0b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -610,7 +610,7 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return 0;
}
-static int vmw_ttm_unbind(struct ttm_tt *ttm)
+static void vmw_ttm_unbind(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
@@ -628,8 +628,6 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
-
- return 0;
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 6a995db51d6d..e201f62d62c0 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full);
*/
void host1x_driver_unregister(struct host1x_driver *driver)
{
+ struct host1x *host1x;
+
driver_unregister(&driver->driver);
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list)
+ host1x_detach_driver(host1x, driver);
+
+ mutex_unlock(&devices_lock);
+
mutex_lock(&drivers_lock);
list_del_init(&driver->list);
mutex_unlock(&drivers_lock);
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index c0392672a842..1b4997bda1c7 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -16,6 +16,8 @@
#include "debug.h"
#include "channel.h"
+static DEFINE_MUTEX(debug_lock);
+
unsigned int host1x_debug_trace_cmdbuf;
static pid_t host1x_debug_force_timeout_pid;
@@ -52,12 +54,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
struct output *o = data;
mutex_lock(&ch->cdma.lock);
+ mutex_lock(&debug_lock);
if (show_fifo)
host1x_hw_show_channel_fifo(m, ch, o);
host1x_hw_show_channel_cdma(m, ch, o);
+ mutex_unlock(&debug_lock);
mutex_unlock(&ch->cdma.lock);
return 0;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index d24344e91922..d0ebb70e2fdd 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -468,11 +468,18 @@ static int host1x_probe(struct platform_device *pdev)
err = host1x_register(host);
if (err < 0)
- goto deinit_intr;
+ goto deinit_debugfs;
+
+ err = devm_of_platform_populate(&pdev->dev);
+ if (err < 0)
+ goto unregister;
return 0;
-deinit_intr:
+unregister:
+ host1x_unregister(host);
+deinit_debugfs:
+ host1x_debug_deinit(host);
host1x_intr_deinit(host);
deinit_syncpt:
host1x_syncpt_deinit(host);
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 02125842071c..f31bcfa1b837 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -192,8 +192,14 @@ static void show_gather(struct output *o, phys_addr_t phys_addr,
static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
{
+ struct push_buffer *pb = &cdma->push_buffer;
struct host1x_job *job;
+ host1x_debug_output(o, "PUSHBUF at %pad, %u words\n",
+ &pb->dma, pb->size / 4);
+
+ show_gather(o, pb->dma, pb->size / 4, cdma, pb->dma, pb->mapped);
+
list_for_each_entry(job, &cdma->sync_queue, list) {
unsigned int i;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index a10643aa89aa..89b6c14b7392 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -27,10 +27,13 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
u32 num_cmdbufs, u32 num_relocs)
{
struct host1x_job *job = NULL;
- unsigned int num_unpins = num_cmdbufs + num_relocs;
+ unsigned int num_unpins = num_relocs;
u64 total;
void *mem;
+ if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ num_unpins += num_cmdbufs;
+
/* Check that we're not going to overflow */
total = sizeof(struct host1x_job) +
(u64)num_relocs * sizeof(struct host1x_reloc) +
@@ -102,6 +105,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
struct host1x_client *client = job->client;
struct device *dev = client->dev;
+ struct host1x_job_gather *g;
struct iommu_domain *domain;
unsigned int i;
int err;
@@ -183,8 +187,14 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
job->num_unpins++;
}
+ /*
+ * We will copy gathers BO content later, so there is no need to
+ * hold and pin them.
+ */
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ return 0;
+
for (i = 0; i < job->num_gathers; i++) {
- struct host1x_job_gather *g = &job->gathers[i];
size_t gather_size = 0;
struct scatterlist *sg;
struct sg_table *sgt;
@@ -194,6 +204,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
dma_addr_t *phys;
unsigned int j;
+ g = &job->gathers[i];
g->bo = host1x_bo_get(g->bo);
if (!g->bo) {
err = -EINVAL;
@@ -213,10 +224,10 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
sgt = host1x_bo_pin(host->dev, g->bo, phys);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
- goto unpin;
+ goto put;
}
- if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+ if (host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
gather_size += sg->length;
gather_size = iova_align(&host->iova, gather_size);
@@ -226,7 +237,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
host->iova_end >> shift, true);
if (!alloc) {
err = -ENOMEM;
- goto unpin;
+ goto put;
}
err = iommu_map_sg(host->domain,
@@ -235,7 +246,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
if (err == 0) {
__free_iova(&host->iova, alloc);
err = -EINVAL;
- goto unpin;
+ goto put;
}
job->unpins[job->num_unpins].size = gather_size;
@@ -245,7 +256,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
DMA_TO_DEVICE);
if (!err) {
err = -ENOMEM;
- goto unpin;
+ goto put;
}
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
@@ -263,6 +274,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
return 0;
+put:
+ host1x_bo_put(g->bo);
unpin:
host1x_job_unpin(job);
return err;
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index e00809d996a2..e606464aa43c 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -21,9 +21,9 @@
*/
#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -206,9 +206,9 @@ static int tegra_mipi_power_down(struct tegra_mipi *mipi)
return 0;
}
-struct tegra_mipi_device *tegra_mipi_request(struct device *device)
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+ struct device_node *np)
{
- struct device_node *np = device->of_node;
struct tegra_mipi_device *dev;
struct of_phandle_args args;
int err;
@@ -293,22 +293,29 @@ int tegra_mipi_disable(struct tegra_mipi_device *dev)
}
EXPORT_SYMBOL(tegra_mipi_disable);
-static int tegra_mipi_wait(struct tegra_mipi *mipi)
+int tegra_mipi_wait(struct tegra_mipi_device *device)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(250);
+ struct tegra_mipi *mipi = device->mipi;
+ void __iomem *status_reg = mipi->regs + (MIPI_CAL_STATUS << 2);
u32 value;
+ int err;
- while (time_before(jiffies, timeout)) {
- value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
- if ((value & MIPI_CAL_STATUS_ACTIVE) == 0 &&
- (value & MIPI_CAL_STATUS_DONE) != 0)
- return 0;
+ err = clk_enable(device->mipi->clk);
+ if (err < 0)
+ return err;
- usleep_range(10, 50);
- }
+ mutex_lock(&device->mipi->lock);
- return -ETIMEDOUT;
+ err = readl_relaxed_poll_timeout(status_reg, value,
+ !(value & MIPI_CAL_STATUS_ACTIVE) &&
+ (value & MIPI_CAL_STATUS_DONE), 50,
+ 250000);
+ mutex_unlock(&device->mipi->lock);
+ clk_disable(device->mipi->clk);
+
+ return err;
}
+EXPORT_SYMBOL(tegra_mipi_wait);
int tegra_mipi_calibrate(struct tegra_mipi_device *device)
{
@@ -374,12 +381,10 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
value |= MIPI_CAL_CTRL_START;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
- err = tegra_mipi_wait(device->mipi);
-
mutex_unlock(&device->mipi->lock);
clk_disable(device->mipi->clk);
- return err;
+ return 0;
}
EXPORT_SYMBOL(tegra_mipi_calibrate);