summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h190
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c198
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c340
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c479
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c479
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c269
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h)45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c507
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c431
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.h (renamed from drivers/gpu/drm/amd/amdgpu/tonga_smum.h)21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c802
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.h (renamed from drivers/gpu/drm/amd/amdgpu/iceland_smum.h)20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c863
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c3362
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c269
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1005
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c1071
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c677
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/r600_dpm.h127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c1965
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h (renamed from drivers/gpu/drm/amd/amdgpu/fiji_smum.h)23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c915
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c8006
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h1015
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_smc.c273
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sislands_smc.h423
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c862
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c240
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c442
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c535
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c61
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c8
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h13
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h941
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h105
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/sid.h2461
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h2
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/include/cgs_common.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/Kconfig6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c135
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c233
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c121
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h105
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c5599
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c613
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h81
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c314
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c5290
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c716
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h)2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c)281
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h)10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c)160
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h)25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h55
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c4359
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h)244
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c)985
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h)56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c)258
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h107
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c6276
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h397
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h61
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h22
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h175
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h22
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu71.h510
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h631
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_common.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h77
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c46
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c2374
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h51
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c612
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2576
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h)26
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c250
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h71
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c2287
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h42
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c708
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h59
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c589
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h87
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c110
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c3206
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h)44
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c672
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h46
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
193 files changed, 50840 insertions, 29877 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 7335c0420c70..61360e27715f 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -1,3 +1,10 @@
+config DRM_AMDGPU_SI
+ bool "Enable amdgpu support for SI parts"
+ depends on DRM_AMDGPU
+ help
+ Choose this option if you want to enable experimental support
+ for SI asics.
+
config DRM_AMDGPU_CIK
bool "Enable amdgpu support for CIK parts"
depends on DRM_AMDGPU
@@ -25,3 +32,4 @@ config DRM_AMDGPU_GART_DEBUGFS
Selecting this option creates a debugfs file to inspect the mapped
pages. Uses more memory for housekeeping, enable only for debugging.
+source "drivers/gpu/drm/amd/acp/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7fcdcedaadb..248a05d02917 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -23,13 +23,16 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
- amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
+ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
+ amdgpu_gtt_mgr.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
amdgpu_amdkfd_gfx_v7.o
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
+
amdgpu-y += \
vi.o
@@ -50,15 +53,13 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_dpm.o \
amdgpu_powerplay.o \
- cz_smc.o cz_dpm.o \
- tonga_smc.o tonga_dpm.o \
- fiji_smc.o fiji_dpm.o \
- iceland_smc.o iceland_dpm.o
+ cz_smc.o cz_dpm.o
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o
+ dce_v11_0.o \
+ dce_virtual.o
# add GFX block
amdgpu-y += \
@@ -110,14 +111,10 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
-ifneq ($(CONFIG_DRM_AMD_POWERPLAY),)
-
include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
-endif
-
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 06192698bd96..b8d66670bb17 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,6 +90,7 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
+#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -119,6 +120,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@@ -147,6 +149,7 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
+#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -408,6 +411,10 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
+#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8ebc5f1eb4c0..039b57e4644c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -51,11 +51,13 @@
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_ttm.h"
#include "amdgpu_gds.h"
#include "amd_powerplay.h"
#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
+#include "amdgpu_virt.h"
/*
* Modules parameters.
@@ -63,6 +65,7 @@
extern int amdgpu_modeset;
extern int amdgpu_vram_limit;
extern int amdgpu_gart_size;
+extern int amdgpu_moverate;
extern int amdgpu_benchmarking;
extern int amdgpu_testing;
extern int amdgpu_audio;
@@ -91,6 +94,9 @@ extern unsigned amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask;
extern char *amdgpu_disable_cu;
+extern int amdgpu_sclk_deep_sleep_en;
+extern char *amdgpu_virtual_display;
+extern unsigned amdgpu_pp_feature_mask;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -105,7 +111,7 @@ extern char *amdgpu_disable_cu;
#define AMDGPU_MAX_RINGS 16
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
-#define AMDGPU_MAX_VCE_RINGS 2
+#define AMDGPU_MAX_VCE_RINGS 3
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
@@ -248,10 +254,9 @@ struct amdgpu_vm_pte_funcs {
uint64_t pe, uint64_t src,
unsigned count);
/* write pte one entry at a time with addr mapping */
- void (*write_pte)(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+ void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr);
/* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
@@ -316,6 +321,10 @@ struct amdgpu_ring_funcs {
/* note usage for clock and power gating */
void (*begin_use)(struct amdgpu_ring *ring);
void (*end_use)(struct amdgpu_ring *ring);
+ void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+ void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+ unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
+ unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
};
/*
@@ -396,46 +405,8 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
- * TTM.
+ * BO.
*/
-
-#define AMDGPU_TTM_LRU_SIZE 20
-
-struct amdgpu_mman_lru {
- struct list_head *lru[TTM_NUM_MEM_TYPES];
- struct list_head *swap_lru;
-};
-
-struct amdgpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_device bdev;
- bool mem_global_referenced;
- bool initialized;
-
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *vram;
- struct dentry *gtt;
-#endif
-
- /* buffer handling */
- const struct amdgpu_buffer_funcs *buffer_funcs;
- struct amdgpu_ring *buffer_funcs_ring;
- /* Scheduler entity for buffer moves */
- struct amd_sched_entity entity;
-
- /* custom LRU management */
- struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
-};
-
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
- struct reservation_object *resv,
- struct fence **fence);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
struct amdgpu_bo_list_entry {
struct amdgpu_bo *robj;
struct ttm_validate_buffer tv;
@@ -474,8 +445,6 @@ struct amdgpu_bo_va {
#define AMDGPU_GEM_DOMAIN_MAX 0x3
struct amdgpu_bo {
- /* Protected by gem.mutex */
- struct list_head list;
/* Protected by tbo.reserved */
u32 prefered_domains;
u32 allowed_domains;
@@ -498,10 +467,12 @@ struct amdgpu_bo {
struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
+ struct amdgpu_bo *shadow;
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;
struct list_head mn_list;
+ struct list_head shadow_list;
};
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
@@ -646,11 +617,12 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
/*
* GPU MC structures, functions & helpers
@@ -677,6 +649,8 @@ struct amdgpu_mc {
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
+ uint32_t srbm_soft_reset;
+ struct amdgpu_mode_mc_save save;
};
/*
@@ -721,13 +695,14 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
*/
struct amdgpu_flip_work {
- struct work_struct flip_work;
+ struct delayed_work flip_work;
struct work_struct unpin_work;
struct amdgpu_device *adev;
int crtc_id;
+ u32 target_vblank;
uint64_t base;
struct drm_pending_vblank_event *event;
- struct amdgpu_bo *old_rbo;
+ struct amdgpu_bo *old_abo;
struct fence *excl;
unsigned shared_count;
struct fence **shared;
@@ -815,13 +790,17 @@ struct amdgpu_ring {
/* maximum number of VMIDs */
#define AMDGPU_NUM_VM 16
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
/* number of entries in page table */
#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
-#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
#define AMDGPU_PTE_VALID (1 << 0)
#define AMDGPU_PTE_SYSTEM (1 << 1)
@@ -833,10 +812,7 @@ struct amdgpu_ring {
#define AMDGPU_PTE_READABLE (1 << 5)
#define AMDGPU_PTE_WRITEABLE (1 << 6)
-/* PTE (Page Table Entry) fragment field for different page sizes */
-#define AMDGPU_PTE_FRAG_4KB (0 << 7)
-#define AMDGPU_PTE_FRAG_64KB (4 << 7)
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
@@ -846,6 +822,7 @@ struct amdgpu_ring {
struct amdgpu_vm_pt {
struct amdgpu_bo_list_entry entry;
uint64_t addr;
+ uint64_t shadow_addr;
};
struct amdgpu_vm {
@@ -948,7 +925,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -957,7 +933,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem);
+ bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
@@ -992,6 +968,7 @@ struct amdgpu_ctx {
spinlock_t ring_lock;
struct fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
+ bool preamble_presented;
};
struct amdgpu_ctx_mgr {
@@ -1195,6 +1172,10 @@ struct amdgpu_gfx {
unsigned ce_ram_size;
struct amdgpu_cu_info cu_info;
const struct amdgpu_gfx_funcs *funcs;
+
+ /* reset mask */
+ uint32_t grbm_soft_reset;
+ uint32_t srbm_soft_reset;
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1247,11 +1228,16 @@ struct amdgpu_cs_parser {
struct fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved;
+ struct amdgpu_bo_list_entry *evictable;
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
};
+#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
+
struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
@@ -1260,9 +1246,10 @@ struct amdgpu_job {
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
struct fence *fence; /* the hw fence */
+ uint32_t preamble_status;
uint32_t num_ibs;
void *owner;
- uint64_t ctx;
+ uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush;
unsigned vm_id;
uint64_t vm_pd_addr;
@@ -1683,6 +1670,7 @@ struct amdgpu_uvd {
bool address_64_bit;
bool use_ctx_buf;
struct amd_sched_entity entity;
+ uint32_t srbm_soft_reset;
};
/*
@@ -1709,6 +1697,8 @@ struct amdgpu_vce {
struct amdgpu_irq_src irq;
unsigned harvest_config;
struct amd_sched_entity entity;
+ uint32_t srbm_soft_reset;
+ unsigned num_rings;
};
/*
@@ -1726,9 +1716,14 @@ struct amdgpu_sdma_instance {
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+#ifdef CONFIG_DRM_AMDGPU_SI
+ //SI DMA has a difference trap irq number for the second engine
+ struct amdgpu_irq_src trap_irq_1;
+#endif
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
+ uint32_t srbm_soft_reset;
};
/*
@@ -1830,6 +1825,7 @@ struct amdgpu_asic_funcs {
bool (*read_disabled_bios)(struct amdgpu_device *adev);
bool (*read_bios_from_rom)(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
+ void (*detect_hw_virtualization) (struct amdgpu_device *adev);
int (*read_register)(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -1839,8 +1835,9 @@ struct amdgpu_asic_funcs {
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
- /* query virtual capabilities */
- u32 (*get_virtual_caps)(struct amdgpu_device *adev);
+ /* static power management */
+ int (*get_pcie_lanes)(struct amdgpu_device *adev);
+ void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
};
/*
@@ -1933,16 +1930,6 @@ struct amdgpu_atcs {
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
-
-/* GPU virtualization */
-#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
-#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
-struct amdgpu_virtualization {
- bool supports_sr_iov;
- bool is_virtual;
- u32 caps;
-};
-
/*
* Core structure, functions and helpers.
*/
@@ -1956,6 +1943,8 @@ struct amdgpu_ip_block_status {
bool valid;
bool sw;
bool hw;
+ bool late_initialized;
+ bool hang;
};
struct amdgpu_device {
@@ -2014,6 +2003,8 @@ struct amdgpu_device {
spinlock_t pcie_idx_lock;
amdgpu_rreg_t pcie_rreg;
amdgpu_wreg_t pcie_wreg;
+ amdgpu_rreg_t pciep_rreg;
+ amdgpu_wreg_t pciep_wreg;
/* protects concurrent UVD register access */
spinlock_t uvd_ctx_idx_lock;
amdgpu_rreg_t uvd_ctx_rreg;
@@ -2054,7 +2045,16 @@ struct amdgpu_device {
atomic64_t num_evictions;
atomic_t gpu_reset_counter;
+ /* data for buffer migration throttling */
+ struct {
+ spinlock_t lock;
+ s64 last_update_us;
+ s64 accum_us; /* accumulated microseconds */
+ u32 log2_max_MBps;
+ } mm_stats;
+
/* display */
+ bool enable_virtual_display;
struct amdgpu_mode_info mode_info;
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
@@ -2117,6 +2117,14 @@ struct amdgpu_device {
struct kfd_dev *kfd;
struct amdgpu_virtualization virtualization;
+
+ /* link all shadow bo */
+ struct list_head shadow_list;
+ struct mutex shadow_list_lock;
+ /* link all gtt */
+ spinlock_t gtt_list_lock;
+ struct list_head gtt_list;
+
};
bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2149,6 +2157,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
+#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
@@ -2192,6 +2202,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
#define REG_GET_FIELD(value, reg, field) \
(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD(reg, field, val) \
+ WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
/*
* BIOS helpers.
*/
@@ -2235,14 +2248,17 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
+#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
+#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
+#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
+#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
@@ -2257,9 +2273,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
+#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
+#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2291,6 +2311,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+ -EINVAL)
+
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
@@ -2342,11 +2367,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_vce((adev), (g)))
-#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
-
#define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
@@ -2387,6 +2407,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
/* Common functions */
int amdgpu_gpu_reset(struct amdgpu_device *adev);
+bool amdgpu_need_backup(struct amdgpu_device *adev);
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
@@ -2395,7 +2416,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
struct amdgpu_ring **out_ring);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@@ -2412,6 +2433,10 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
+int amdgpu_ttm_global_init(struct amdgpu_device *adev);
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
@@ -2423,11 +2448,13 @@ void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
+bool amdgpu_atpx_dgpu_req_power_for_displays(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
+static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
#endif
/*
@@ -2444,8 +2471,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
@@ -2491,6 +2518,7 @@ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
struct amdgpu_bo_va_mapping *
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo);
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
#include "amdgpu_object.h"
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 5cd7b736a9de..5796539a0bcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -25,6 +25,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
#include <acpi/video.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
#endif
}
}
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if ((adev->flags & AMD_IS_PX) &&
+ amdgpu_atpx_dgpu_req_power_for_displays()) {
+ pm_runtime_get_sync(adev->ddev->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(adev->ddev);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ }
+ }
/* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d080d0807a5b..dba8a5b25e66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
return r;
}
-u32 pool_to_domain(enum kgd_memory_pool p)
-{
- switch (p) {
- case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
- default: return AMDGPU_GEM_DOMAIN_GTT;
- }
-}
-
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 362bedc9e507..1a0a5f7cccbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -103,11 +103,11 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout);
+ unsigned int utimeout);
static int kgd_address_watch_disable(struct kgd_dev *kgd);
static int kgd_address_watch_execute(struct kgd_dev *kgd,
unsigned int watch_point_id,
@@ -437,11 +437,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
}
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
+ int timeout = utimeout;
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -452,9 +453,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
temp = RREG32(mmCP_HQD_ACTIVE);
if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
break;
- if (timeout == 0) {
- pr_err("kfd: cp queue preemption time out (%dms)\n",
- temp);
+ if (timeout <= 0) {
+ pr_err("kfd: cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
@@ -467,12 +467,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
}
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout)
+ unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
+ int timeout = utimeout;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -485,7 +486,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
- if (timeout == 0)
+ if (timeout <= 0)
return -ETIME;
msleep(20);
timeout -= 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 04b744d64b57..6697612239c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -62,10 +62,10 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout);
+ unsigned int utimeout);
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static int kgd_address_watch_disable(struct kgd_dev *kgd);
static int kgd_address_watch_execute(struct kgd_dev *kgd,
@@ -349,11 +349,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
}
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
+ int timeout = utimeout;
acquire_queue(kgd, pipe_id, queue_id);
@@ -363,9 +364,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
temp = RREG32(mmCP_HQD_ACTIVE);
if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
break;
- if (timeout == 0) {
- pr_err("kfd: cp queue preemption time out (%dms)\n",
- temp);
+ if (timeout <= 0) {
+ pr_err("kfd: cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
@@ -378,12 +378,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
}
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout)
+ unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
+ int timeout = utimeout;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -396,7 +397,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
- if (timeout == 0)
+ if (timeout <= 0)
return -ETIME;
msleep(20);
timeout -= 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 983175363b06..8e6bf548d689 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -259,6 +259,33 @@ static const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown
};
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 size, data_offset;
+ u8 frev, crev;
+ ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+ ATOM_OBJECT_HEADER *obj_header;
+
+ if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ return false;
+
+ if (crev < 2)
+ return false;
+
+ obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+ path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usDisplayPathTableOffset));
+
+ if (path_obj->ucNumOfDispPath)
+ return true;
+ else
+ return false;
+}
+
bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
@@ -321,6 +348,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
(le16_to_cpu(path->usConnObjectId) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+ /* Skip TV/CV support */
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT) ||
+ (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ continue;
+
+ if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
+ DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
+ con_obj_id, le16_to_cpu(path->usDeviceTag));
+ continue;
+ }
+
connector_type =
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
@@ -951,6 +991,48 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
return -EINVAL;
switch (crev) {
+ case 2:
+ case 3:
+ case 5:
+ /* r6xx, r7xx, evergreen, ni, si.
+ * TODO: add support for asic_type <= CHIP_RV770*/
+ if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+ args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v3.ucPostDiv;
+ dividers->enable_post_div = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v3.ucRefDiv;
+ dividers->vco_mode = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ } else {
+ /* for SI we use ComputeMemoryClockParam for memory plls */
+ if (adev->asic_type >= CHIP_TAHITI)
+ return -EINVAL;
+ args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+ if (strobe_mode)
+ args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v5.ucPostDiv;
+ dividers->enable_post_div = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v5.ucRefDiv;
+ dividers->vco_mode = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ }
+ break;
case 4:
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
@@ -1095,6 +1177,32 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+ u16 *vddc, u16 *vddci, u16 *mvdd)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ u8 frev, crev;
+ u16 data_offset;
+ union firmware_info *firmware_info;
+
+ *vddc = 0;
+ *vddci = 0;
+ *mvdd = 0;
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+ *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ if ((frev == 2) && (crev >= 2)) {
+ *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+ *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
+ }
+ }
+}
+
union set_voltage {
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
struct _SET_VOLTAGE_PARAMETERS v1;
@@ -1102,6 +1210,52 @@ union set_voltage {
struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
};
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+ args.v2.ucVoltageMode = 0;
+ args.v2.usVoltageLevel = 0;
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+ break;
+ case 3:
+ args.v3.ucVoltageType = voltage_type;
+ args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+ args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v3.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+ u16 *voltage,
+ u16 leakage_idx)
+{
+ return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
+}
+
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
u16 voltage_level,
u8 voltage_type)
@@ -1322,6 +1476,50 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL
return NULL;
}
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ union voltage_object_info *voltage_info;
+ union voltage_object *voltage_object = NULL;
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ voltage_info = (union voltage_object_info *)
+ (adev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 3:
+ switch (crev) {
+ case 1:
+ voltage_object = (union voltage_object *)
+ amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
+ voltage_type,
+ VOLTAGE_OBJ_SVID2);
+ if (voltage_object) {
+ *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+ *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
u8 voltage_type, u8 voltage_mode)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 8c2e69661799..17356151db38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -140,6 +140,8 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *
uint8_t id);
void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
+
bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
@@ -206,5 +208,19 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+ u16 *voltage,
+ u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+ u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
+ u8 clock_type,
+ u32 clock,
+ bool strobe_mode,
+ struct atom_clock_dividers *dividers);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 49de92600074..dae35a96a694 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -29,6 +29,7 @@ struct amdgpu_atpx {
acpi_handle handle;
struct amdgpu_atpx_functions functions;
bool is_hybrid;
+ bool dgpu_req_power_for_displays;
};
static struct amdgpu_atpx_priv {
@@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) {
return amdgpu_atpx_priv.atpx.is_hybrid;
}
+bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
+ return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -200,19 +205,14 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
-#if 1
- /* This is a temporary hack until the D3 cold support
- * makes it upstream. The ATPX power_control method seems
- * to still work on even if the system should be using
- * the new standardized hybrid D3 cold ACPI interface.
- */
- atpx->functions.power_cntl = true;
-#else
atpx->functions.power_cntl = false;
-#endif
atpx->is_hybrid = true;
}
+ atpx->dgpu_req_power_for_displays = false;
+ if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS)
+ atpx->dgpu_req_power_for_displays = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 33e47a43ae32..345305235349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+ r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
+ false);
if (r)
goto exit_do_move;
r = fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index bc0440f7a31d..7a8bfa34682f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -616,7 +616,7 @@ static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, un
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
}
-int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
@@ -637,7 +637,7 @@ int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
return r;
}
-int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
@@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
return -EINVAL;
}
+static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
+ enum cgs_ucode_id type)
+{
+ CGS_FUNC_ADEV;
+ uint16_t fw_version;
+
+ switch (type) {
+ case CGS_UCODE_ID_SDMA0:
+ fw_version = adev->sdma.instance[0].fw_version;
+ break;
+ case CGS_UCODE_ID_SDMA1:
+ fw_version = adev->sdma.instance[1].fw_version;
+ break;
+ case CGS_UCODE_ID_CP_CE:
+ fw_version = adev->gfx.ce_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_PFP:
+ fw_version = adev->gfx.pfp_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_ME:
+ fw_version = adev->gfx.me_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT1:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT2:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_RLC_G:
+ fw_version = adev->gfx.rlc_fw_version;
+ break;
+ default:
+ DRM_ERROR("firmware type %d do not have version\n", type);
+ fw_version = 0;
+ }
+ return fw_version;
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->mc_addr = gpu_addr;
info->image_size = data_size;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+ info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
char fw_name[30] = {0};
@@ -848,6 +890,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_GFX_SE_INFO:
sys_info->value = adev->gfx.config.max_shader_engines;
break;
+ case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
+ sys_info->value = adev->pdev->subsystem_device;
+ break;
+ case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
+ sys_info->value = adev->pdev->subsystem_vendor;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ff0b55a65ca3..2e3a0543760d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -168,12 +168,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
}
/* Any defined maximum tmds clock limit we must not exceed? */
- if (connector->max_tmds_clock > 0) {
+ if (connector->display_info.max_tmds_clock > 0) {
/* mode_clock is clock in kHz for mode to be modeset on this connector */
mode_clock = amdgpu_connector->pixelclock_for_modeset;
/* Maximum allowable input clock in kHz */
- max_tmds_clock = connector->max_tmds_clock * 1000;
+ max_tmds_clock = connector->display_info.max_tmds_clock;
DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
connector->name, mode_clock, max_tmds_clock);
@@ -769,8 +769,10 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->ddc_bus->has_aux)
+ if (amdgpu_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
+ amdgpu_connector->ddc_bus->has_aux = false;
+ }
amdgpu_connector_free_edid(connector);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
@@ -1504,6 +1506,88 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force,
};
+static struct drm_encoder *
+amdgpu_connector_virtual_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
+ continue;
+
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ return encoder;
+ }
+
+ /* pick the first one */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+ return NULL;
+}
+
+static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+
+ if (encoder) {
+ amdgpu_connector_add_common_modes(encoder, connector);
+ }
+
+ return 0;
+}
+
+static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
+{
+ return 0;
+}
+
+static enum drm_connector_status
+
+amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static int
+amdgpu_connector_virtual_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void amdgpu_connector_virtual_force(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
+ .get_modes = amdgpu_connector_virtual_get_modes,
+ .mode_valid = amdgpu_connector_virtual_mode_valid,
+ .best_encoder = amdgpu_connector_virtual_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
+ .dpms = amdgpu_connector_virtual_dpms,
+ .detect = amdgpu_connector_virtual_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = amdgpu_connector_virtual_set_property,
+ .destroy = amdgpu_connector_destroy,
+ .force = amdgpu_connector_virtual_force,
+};
+
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@@ -1888,6 +1972,17 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+ if (!amdgpu_dig_connector)
+ goto failed;
+ amdgpu_connector->con_priv = amdgpu_dig_connector;
+ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0307ff5887c5..b0f6e6957536 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -91,6 +91,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
uint32_t *offset)
{
struct drm_gem_object *gobj;
+ unsigned long size;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@@ -101,6 +102,11 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL;
+
+ size = amdgpu_bo_size(p->uf_entry.robj);
+ if (size != PAGE_SIZE || (data->offset + 8) > size)
+ return -EINVAL;
+
*offset = data->offset;
drm_gem_object_unreference_unlocked(gobj);
@@ -235,70 +241,212 @@ free_chunk:
return ret;
}
-/* Returns how many bytes TTM can move per IB.
+/* Convert microseconds to bytes. */
+static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
+{
+ if (us <= 0 || !adev->mm_stats.log2_max_MBps)
+ return 0;
+
+ /* Since accum_us is incremented by a million per second, just
+ * multiply it by the number of MB/s to get the number of bytes.
+ */
+ return us << adev->mm_stats.log2_max_MBps;
+}
+
+static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
+{
+ if (!adev->mm_stats.log2_max_MBps)
+ return 0;
+
+ return bytes >> adev->mm_stats.log2_max_MBps;
+}
+
+/* Returns how many bytes TTM can move right now. If no bytes can be moved,
+ * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
+ * which means it can go over the threshold once. If that happens, the driver
+ * will be in debt and no other buffer migrations can be done until that debt
+ * is repaid.
+ *
+ * This approach allows moving a buffer of any size (it's important to allow
+ * that).
+ *
+ * The currency is simply time in microseconds and it increases as the clock
+ * ticks. The accumulated microseconds (us) are converted to bytes and
+ * returned.
*/
static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
{
- u64 real_vram_size = adev->mc.real_vram_size;
- u64 vram_usage = atomic64_read(&adev->vram_usage);
+ s64 time_us, increment_us;
+ u64 max_bytes;
+ u64 free_vram, total_vram, used_vram;
- /* This function is based on the current VRAM usage.
+ /* Allow a maximum of 200 accumulated ms. This is basically per-IB
+ * throttling.
*
- * - If all of VRAM is free, allow relocating the number of bytes that
- * is equal to 1/4 of the size of VRAM for this IB.
+ * It means that in order to get full max MBps, at least 5 IBs per
+ * second must be submitted and not more than 200ms apart from each
+ * other.
+ */
+ const s64 us_upper_bound = 200000;
- * - If more than one half of VRAM is occupied, only allow relocating
- * 1 MB of data for this IB.
- *
- * - From 0 to one half of used VRAM, the threshold decreases
- * linearly.
- * __________________
- * 1/4 of -|\ |
- * VRAM | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \________|1 MB
- * |----------------|
- * VRAM 0 % 100 %
- * used used
- *
- * Note: It's a threshold, not a limit. The threshold must be crossed
- * for buffer relocations to stop, so any buffer of an arbitrary size
- * can be moved as long as the threshold isn't crossed before
- * the relocation takes place. We don't want to disable buffer
- * relocations completely.
+ if (!adev->mm_stats.log2_max_MBps)
+ return 0;
+
+ total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
+ used_vram = atomic64_read(&adev->vram_usage);
+ free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
+
+ spin_lock(&adev->mm_stats.lock);
+
+ /* Increase the amount of accumulated us. */
+ time_us = ktime_to_us(ktime_get());
+ increment_us = time_us - adev->mm_stats.last_update_us;
+ adev->mm_stats.last_update_us = time_us;
+ adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
+ us_upper_bound);
+
+ /* This prevents the short period of low performance when the VRAM
+ * usage is low and the driver is in debt or doesn't have enough
+ * accumulated us to fill VRAM quickly.
*
- * The idea is that buffers should be placed in VRAM at creation time
- * and TTM should only do a minimum number of relocations during
- * command submission. In practice, you need to submit at least
- * a dozen IBs to move all buffers to VRAM if they are in GTT.
+ * The situation can occur in these cases:
+ * - a lot of VRAM is freed by userspace
+ * - the presence of a big buffer causes a lot of evictions
+ * (solution: split buffers into smaller ones)
*
- * Also, things can get pretty crazy under memory pressure and actual
- * VRAM usage can change a lot, so playing safe even at 50% does
- * consistently increase performance.
+ * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
+ * accum_us to a positive number.
+ */
+ if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
+ s64 min_us;
+
+ /* Be more aggresive on dGPUs. Try to fill a portion of free
+ * VRAM now.
+ */
+ if (!(adev->flags & AMD_IS_APU))
+ min_us = bytes_to_us(adev, free_vram / 4);
+ else
+ min_us = 0; /* Reset accum_us on APUs. */
+
+ adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
+ }
+
+ /* This returns 0 if the driver is in debt to disallow (optional)
+ * buffer moves.
+ */
+ max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+ spin_unlock(&adev->mm_stats.lock);
+ return max_bytes;
+}
+
+/* Report how many bytes have really been moved for the last command
+ * submission. This can result in a debt that can stop buffer migrations
+ * temporarily.
+ */
+static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
+ u64 num_bytes)
+{
+ spin_lock(&adev->mm_stats.lock);
+ adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+ spin_unlock(&adev->mm_stats.lock);
+}
+
+static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo *bo)
+{
+ u64 initial_bytes_moved;
+ uint32_t domain;
+ int r;
+
+ if (bo->pin_count)
+ return 0;
+
+ /* Don't move this buffer if we have depleted our allowance
+ * to move it. Don't move anything if the threshold is zero.
*/
+ if (p->bytes_moved < p->bytes_moved_threshold)
+ domain = bo->prefered_domains;
+ else
+ domain = bo->allowed_domains;
+
+retry:
+ amdgpu_ttm_placement_from_domain(bo, domain);
+ initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ initial_bytes_moved;
+
+ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+ goto retry;
+ }
- u64 half_vram = real_vram_size >> 1;
- u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
- u64 bytes_moved_threshold = half_free_vram >> 1;
- return max(bytes_moved_threshold, 1024*1024ull);
+ return r;
}
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+/* Last resort, try to evict something from the current working set */
+static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo_list_entry *lobj)
+{
+ uint32_t domain = lobj->robj->allowed_domains;
+ int r;
+
+ if (!p->evictable)
+ return false;
+
+ for (;&p->evictable->tv.head != &p->validated;
+ p->evictable = list_prev_entry(p->evictable, tv.head)) {
+
+ struct amdgpu_bo_list_entry *candidate = p->evictable;
+ struct amdgpu_bo *bo = candidate->robj;
+ u64 initial_bytes_moved;
+ uint32_t other;
+
+ /* If we reached our current BO we can forget it */
+ if (candidate == lobj)
+ break;
+
+ other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* Check if this BO is in one of the domains we need space for */
+ if (!(other & domain))
+ continue;
+
+ /* Check if we can move this BO somewhere else */
+ other = bo->allowed_domains & ~domain;
+ if (!other)
+ continue;
+
+ /* Good we can try to move this BO somewhere else */
+ amdgpu_ttm_placement_from_domain(bo, other);
+ initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ initial_bytes_moved;
+
+ if (unlikely(r))
+ break;
+
+ p->evictable = list_prev_entry(p->evictable, tv.head);
+ list_move(&candidate->tv.head, &p->validated);
+
+ return true;
+ }
+
+ return false;
+}
+
+static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
struct amdgpu_bo_list_entry *lobj;
- u64 initial_bytes_moved;
int r;
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = lobj->robj;
bool binding_userptr = false;
struct mm_struct *usermm;
- uint32_t domain;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm && usermm != current->mm)
@@ -313,35 +461,19 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
binding_userptr = true;
}
- if (bo->pin_count)
- continue;
-
- /* Avoid moving this one if we have moved too many buffers
- * for this IB already.
- *
- * Note that this allows moving at least one buffer of
- * any size, because it doesn't take the current "bo"
- * into account. We don't want to disallow buffer moves
- * completely.
- */
- if (p->bytes_moved <= p->bytes_moved_threshold)
- domain = bo->prefered_domains;
- else
- domain = bo->allowed_domains;
-
- retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
- initial_bytes_moved;
+ if (p->evictable == lobj)
+ p->evictable = NULL;
- if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
- domain = bo->allowed_domains;
- goto retry;
- }
+ do {
+ r = amdgpu_cs_bo_validate(p, bo);
+ } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+ if (r)
return r;
+
+ if (bo->shadow) {
+ r = amdgpu_cs_bo_validate(p, bo);
+ if (r)
+ return r;
}
if (binding_userptr) {
@@ -386,8 +518,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
goto error_free_pages;
+ }
/* Without a BO list we don't have userptr BOs */
if (!p->bo_list)
@@ -427,9 +561,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
/* Unreserve everything again. */
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- /* We tried to often, just abort */
+ /* We tried too many times, just abort */
if (!--tries) {
r = -EDEADLK;
+ DRM_ERROR("deadlock in %s\n", __func__);
goto error_free_pages;
}
@@ -441,11 +576,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
sizeof(struct page*));
if (!e->user_pages) {
r = -ENOMEM;
+ DRM_ERROR("calloc failure in %s\n", __func__);
goto error_free_pages;
}
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
if (r) {
+ DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
drm_free_large(e->user_pages);
e->user_pages = NULL;
goto error_free_pages;
@@ -460,14 +597,23 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
+ p->evictable = list_last_entry(&p->validated,
+ struct amdgpu_bo_list_entry,
+ tv.head);
r = amdgpu_cs_list_validate(p, &duplicates);
- if (r)
+ if (r) {
+ DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
goto error_validate;
+ }
r = amdgpu_cs_list_validate(p, &p->validated);
- if (r)
+ if (r) {
+ DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
goto error_validate;
+ }
+
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
fpriv->vm.last_eviction_counter =
atomic64_read(&p->adev->num_evictions);
@@ -499,8 +645,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
}
- if (p->uf_entry.robj)
- p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+ if (!r && p->uf_entry.robj) {
+ struct amdgpu_bo *uf = p->uf_entry.robj;
+
+ r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
+ p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+ }
error_validate:
if (r) {
@@ -617,7 +767,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (bo_va == NULL)
continue;
- r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
@@ -710,6 +860,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (r)
return r;
+ if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
+ parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+ if (!parser->ctx->preamble_presented) {
+ parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ parser->ctx->preamble_presented = true;
+ }
+ }
+
if (parser->job->ring && parser->job->ring != ring)
return -EINVAL;
@@ -849,7 +1007,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
job->owner = p->filp;
- job->ctx = entity->fence_context;
+ job->fence_ctx = entity->fence_context;
p->fence = fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
@@ -1015,3 +1173,29 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
return NULL;
}
+
+/**
+ * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
+ *
+ * @parser: command submission parser context
+ *
+ * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
+ */
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
+{
+ unsigned i;
+ int r;
+
+ if (!parser->bo_list)
+ return 0;
+
+ for (i = 0; i < parser->bo_list->num_entries; i++) {
+ struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (unlikely(r))
+ return r;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 17e13621fae9..e203e5561107 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -60,6 +60,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
+ ctx->fences = NULL;
return r;
}
return 0;
@@ -77,6 +78,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (j = 0; j < amdgpu_sched_jobs; ++j)
fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
+ ctx->fences = NULL;
for (i = 0; i < adev->num_rings; i++)
amd_sched_entity_fini(&adev->rings[i]->sched,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index df7ab2458e50..7dbe85d67d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -41,16 +41,26 @@
#include "atom.h"
#include "amdgpu_atombios.h"
#include "amd_pcie.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "si.h"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "cik.h"
#endif
#include "vi.h"
#include "bif/bif_4_1_d.h"
+#include <linux/pci.h>
+#include <linux/firmware.h>
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
static const char *amdgpu_asic_name[] = {
+ "TAHITI",
+ "PITCAIRN",
+ "VERDE",
+ "OLAND",
+ "HAINAN",
"BONAIRE",
"KAVERI",
"KABINI",
@@ -101,7 +111,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
bool always_indirect)
{
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
@@ -642,6 +652,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev)
}
+static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ if (amdgpu_passthrough(adev)) {
+ /* for FIJI: In whole GPU pass-through virtualization case
+ * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
+ * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
+ * but if we force vPost do in pass-through case, the driver reload will hang.
+ * whether doing vPost depends on amdgpu_card_posted if smc version is above
+ * 00160e00 for FIJI.
+ */
+ if (adev->asic_type == CHIP_FIJI) {
+ int err;
+ uint32_t fw_ver;
+ err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
+ /* force vPost if error occured */
+ if (err)
+ return true;
+
+ fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+ if (fw_ver >= 0x00160e00)
+ return !amdgpu_card_posted(adev);
+ }
+ } else {
+ /* in bare-metal case, amdgpu_card_posted return false
+ * after system reboot/boot, and return true if driver
+ * reloaded.
+ * we shouldn't do vPost after driver reload otherwise GPU
+ * could hang.
+ */
+ if (amdgpu_card_posted(adev))
+ return false;
+ }
+
+ /* we assume vPost is neede for all other cases */
+ return true;
+}
+
/**
* amdgpu_dummy_page_init - init dummy page used by the driver
*
@@ -1026,7 +1076,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_resume_kms(dev, true, true);
+ amdgpu_device_resume(dev, true, true);
dev->pdev->d3_delay = d3_delay;
@@ -1036,7 +1086,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
printk(KERN_INFO "amdgpu: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_suspend_kms(dev, true, true);
+ amdgpu_device_suspend(dev, true, true);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1181,10 +1231,38 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+{
+ adev->enable_virtual_display = false;
+
+ if (amdgpu_virtual_display) {
+ struct drm_device *ddev = adev->ddev;
+ const char *pci_address_name = pci_name(ddev->pdev);
+ char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+
+ pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
+ pciaddstr_tmp = pciaddstr;
+ while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+ if (!strcmp(pci_address_name, pciaddname)) {
+ adev->enable_virtual_display = true;
+ break;
+ }
+ }
+
+ DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display);
+
+ kfree(pciaddstr);
+ }
+}
+
static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_whether_enable_virtual_display(adev);
+
switch (adev->asic_type) {
case CHIP_TOPAZ:
case CHIP_TONGA:
@@ -1202,6 +1280,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
if (r)
return r;
break;
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ adev->family = AMDGPU_FAMILY_SI;
+ r = si_set_ip_blocks(adev);
+ if (r)
+ return r;
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -1318,6 +1408,9 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
continue;
+ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
+ adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
+ continue;
/* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE);
@@ -1331,6 +1424,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
+ adev->ip_block_status[i].late_initialized = true;
}
}
@@ -1376,8 +1470,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_block_status[i].late_initialized)
+ continue;
if (adev->ip_blocks[i].funcs->late_fini)
adev->ip_blocks[i].funcs->late_fini((void *)adev);
+ adev->ip_block_status[i].late_initialized = false;
}
return 0;
@@ -1433,13 +1530,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_device_is_virtual(void)
+static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
-#ifdef CONFIG_X86
- return boot_cpu_has(X86_FEATURE_HYPERVISOR);
-#else
- return false;
-#endif
+ if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
}
/**
@@ -1461,6 +1555,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
{
int r, i;
bool runtime = false;
+ u32 max_MBps;
adev->shutdown = false;
adev->dev = &pdev->dev;
@@ -1484,6 +1579,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->smc_wreg = &amdgpu_invalid_wreg;
adev->pcie_rreg = &amdgpu_invalid_rreg;
adev->pcie_wreg = &amdgpu_invalid_wreg;
+ adev->pciep_rreg = &amdgpu_invalid_rreg;
+ adev->pciep_wreg = &amdgpu_invalid_wreg;
adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -1520,9 +1617,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->didt_idx_lock);
spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
+ spin_lock_init(&adev->mm_stats.lock);
+
+ INIT_LIST_HEAD(&adev->shadow_list);
+ mutex_init(&adev->shadow_list_lock);
+
+ INIT_LIST_HEAD(&adev->gtt_list);
+ spin_lock_init(&adev->gtt_list_lock);
+
+ if (adev->asic_type >= CHIP_BONAIRE) {
+ adev->rmmio_base = pci_resource_start(adev->pdev, 5);
+ adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+ } else {
+ adev->rmmio_base = pci_resource_start(adev->pdev, 2);
+ adev->rmmio_size = pci_resource_len(adev->pdev, 2);
+ }
- adev->rmmio_base = pci_resource_start(adev->pdev, 5);
- adev->rmmio_size = pci_resource_len(adev->pdev, 5);
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
if (adev->rmmio == NULL) {
return -ENOMEM;
@@ -1530,8 +1640,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ if (adev->asic_type >= CHIP_BONAIRE)
+ /* doorbell bar mapping */
+ amdgpu_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1579,25 +1690,24 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
- /* See if the asic supports SR-IOV */
- adev->virtualization.supports_sr_iov =
- amdgpu_atombios_has_gpu_virtualization_table(adev);
-
- /* Check if we are executing in a virtualized environment */
- adev->virtualization.is_virtual = amdgpu_device_is_virtual();
- adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+ /* detect if we are with an SRIOV vbios */
+ amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (!amdgpu_card_posted(adev) ||
- (adev->virtualization.is_virtual &&
- !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
+ if (amdgpu_vpost_needed(adev)) {
if (!adev->bios) {
- dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
+ dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU not posted. posting now...\n");
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ DRM_INFO("GPU posting now...\n");
+ r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (r) {
+ dev_err(adev->dev, "gpu post error!\n");
+ goto failed;
+ }
+ } else {
+ DRM_INFO("GPU post is not needed\n");
}
/* Initialize clocks */
@@ -1628,6 +1738,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->accel_working = true;
+ /* Initialize the buffer migration limit. */
+ if (amdgpu_moverate >= 0)
+ max_MBps = amdgpu_moverate;
+ else
+ max_MBps = 8; /* Allow 8 MB/s. */
+ /* Get a log2 for easy divisions. */
+ adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
+
amdgpu_fbdev_init(adev);
r = amdgpu_ib_pool_init(adev);
@@ -1708,11 +1826,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true;
+ drm_crtc_force_disable_all(adev->ddev);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
- drm_crtc_force_disable_all(adev->ddev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
kfree(adev->ip_block_status);
@@ -1732,7 +1850,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
+ if (adev->asic_type >= CHIP_BONAIRE)
+ amdgpu_doorbell_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
amdgpu_debugfs_remove_files(adev);
}
@@ -1742,7 +1861,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
* Suspend & resume.
*/
/**
- * amdgpu_suspend_kms - initiate device suspend
+ * amdgpu_device_suspend - initiate device suspend
*
* @pdev: drm dev pointer
* @state: suspend state
@@ -1751,7 +1870,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
{
struct amdgpu_device *adev;
struct drm_crtc *crtc;
@@ -1819,6 +1938,10 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
+ } else {
+ r = amdgpu_asic_reset(adev);
+ if (r)
+ DRM_ERROR("amdgpu asic reset failed\n");
}
if (fbcon) {
@@ -1830,7 +1953,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
}
/**
- * amdgpu_resume_kms - initiate device resume
+ * amdgpu_device_resume - initiate device resume
*
* @pdev: drm dev pointer
*
@@ -1838,7 +1961,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct amdgpu_device *adev = dev->dev_private;
@@ -1848,22 +1971,26 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (fbcon) {
+ if (fbcon)
console_lock();
- }
+
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev)) {
+ r = pci_enable_device(dev->pdev);
+ if (r) {
if (fbcon)
console_unlock();
- return -1;
+ return r;
}
}
/* post card */
- if (!amdgpu_card_posted(adev))
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (!amdgpu_card_posted(adev) || !resume) {
+ r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (r)
+ DRM_ERROR("amdgpu asic init failed\n");
+ }
r = amdgpu_resume(adev);
if (r)
@@ -1937,6 +2064,126 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
return 0;
}
+static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+{
+ int i;
+ bool asic_hang = false;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].funcs->check_soft_reset)
+ adev->ip_blocks[i].funcs->check_soft_reset(adev);
+ if (adev->ip_block_status[i].hang) {
+ DRM_INFO("IP block:%d is hang!\n", i);
+ asic_hang = true;
+ }
+ }
+ return asic_hang;
+}
+
+static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->pre_soft_reset) {
+ r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+{
+ if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
+ DRM_INFO("Some block need full reset!\n");
+ return true;
+ }
+ return false;
+}
+
+static int amdgpu_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->soft_reset) {
+ r = adev->ip_blocks[i].funcs->soft_reset(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->post_soft_reset)
+ r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
+static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct fence **fence)
+{
+ uint32_t domain;
+ int r;
+
+ if (!bo->shadow)
+ return 0;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (r)
+ return r;
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ /* if bo has been evicted, then no need to recover */
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
+ NULL, fence, true);
+ if (r) {
+ DRM_ERROR("recover page table failed!\n");
+ goto err;
+ }
+ }
+err:
+ amdgpu_bo_unreserve(bo);
+ return r;
+}
+
/**
* amdgpu_gpu_reset - reset the asic
*
@@ -1949,6 +2196,12 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
int i, r;
int resched;
+ bool need_full_reset;
+
+ if (!amdgpu_check_soft_reset(adev)) {
+ DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+ return 0;
+ }
atomic_inc(&adev->gpu_reset_counter);
@@ -1967,40 +2220,93 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(adev);
- /* save scratch */
- amdgpu_atombios_scratch_regs_save(adev);
- r = amdgpu_suspend(adev);
+ need_full_reset = amdgpu_need_full_reset(adev);
-retry:
- /* Disable fb access */
- if (adev->mode_info.num_crtc) {
- struct amdgpu_mode_mc_save save;
- amdgpu_display_stop_mc_access(adev, &save);
- amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!need_full_reset) {
+ amdgpu_pre_soft_reset(adev);
+ r = amdgpu_soft_reset(adev);
+ amdgpu_post_soft_reset(adev);
+ if (r || amdgpu_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
}
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (need_full_reset) {
+ /* save scratch */
+ amdgpu_atombios_scratch_regs_save(adev);
+ r = amdgpu_suspend(adev);
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_resume(adev);
+retry:
+ /* Disable fb access */
+ if (adev->mode_info.num_crtc) {
+ struct amdgpu_mode_mc_save save;
+ amdgpu_display_stop_mc_access(adev, &save);
+ amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ }
+
+ r = amdgpu_asic_reset(adev);
+ /* post card */
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
+ if (!r) {
+ dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_resume(adev);
+ }
+ /* restore scratch */
+ amdgpu_atombios_scratch_regs_restore(adev);
}
- /* restore scratch */
- amdgpu_atombios_scratch_regs_restore(adev);
if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ if (need_full_reset && amdgpu_need_backup(adev)) {
+ r = amdgpu_ttm_recover_gart(adev);
+ if (r)
+ DRM_ERROR("gart recovery failed!!!\n");
+ }
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
r = amdgpu_suspend(adev);
+ need_full_reset = true;
goto retry;
}
+ /**
+ * recovery vm page tables, since we cannot depend on VRAM is
+ * consistent after gpu full reset.
+ */
+ if (need_full_reset && amdgpu_need_backup(adev)) {
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_bo *bo, *tmp;
+ struct fence *fence = NULL, *next = NULL;
+
+ DRM_INFO("recover vram bo from shadow\n");
+ mutex_lock(&adev->shadow_list_lock);
+ list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+ amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+ if (fence) {
+ r = fence_wait(fence, false);
+ if (r) {
+ WARN(r, "recovery from shadow isn't comleted\n");
+ break;
+ }
+ }
+ fence_put(fence);
+ fence = next;
+ }
+ mutex_unlock(&adev->shadow_list_lock);
+ if (fence) {
+ r = fence_wait(fence, false);
+ if (r)
+ WARN(r, "recovery from shadow isn't comleted\n");
+ }
+ fence_put(fence);
+ }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
+
amd_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
}
@@ -2020,7 +2326,6 @@ retry:
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
}
- amdgpu_irq_gpu_reset_resume_helper(adev);
return r;
}
@@ -2178,22 +2483,26 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
- bool use_bank;
+ bool pm_pg_lock, use_bank;
unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
if (*pos & (1ULL << 62)) {
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
use_bank = 1;
- *pos &= 0xFFFFFF;
} else {
use_bank = 0;
}
+ *pos &= 0x3FFFF;
+
if (use_bank) {
if (sh_bank >= adev->gfx.config.max_sh_per_se ||
se_bank >= adev->gfx.config.max_shader_engines)
@@ -2203,6 +2512,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
sh_bank, instance_bank);
}
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
@@ -2228,6 +2540,9 @@ end:
mutex_unlock(&adev->grbm_idx_mutex);
}
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
return result;
}
@@ -2385,7 +2700,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = RREG32_SMC(*pos >> 2);
+ value = RREG32_SMC(*pos);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
@@ -2416,7 +2731,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (r)
return r;
- WREG32_SMC(*pos >> 2, value);
+ WREG32_SMC(*pos, value);
result += 4;
buf += 4;
@@ -2438,12 +2753,12 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+ config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
/* version, increment each time something is added */
- config[no_regs++] = 0;
+ config[no_regs++] = 2;
config[no_regs++] = adev->gfx.config.max_shader_engines;
config[no_regs++] = adev->gfx.config.max_tile_pipes;
config[no_regs++] = adev->gfx.config.max_cu_per_sh;
@@ -2468,6 +2783,15 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
config[no_regs++] = adev->gfx.config.gb_addr_config;
config[no_regs++] = adev->gfx.config.num_rbs;
+ /* rev==1 */
+ config[no_regs++] = adev->rev_id;
+ config[no_regs++] = adev->pg_flags;
+ config[no_regs++] = adev->cg_flags;
+
+ /* rev==2 */
+ config[no_regs++] = adev->family;
+ config[no_regs++] = adev->external_rev_id;
+
while (size && (*pos < no_regs * 4)) {
uint32_t value;
@@ -2488,6 +2812,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
return result;
}
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int idx, r;
+ int32_t value;
+
+ if (size != 4 || *pos & 0x3)
+ return -EINVAL;
+
+ /* convert offset to sensor number */
+ idx = *pos >> 2;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
+ else
+ return -EINVAL;
+
+ if (!r)
+ r = put_user(value, (int32_t *)buf);
+
+ return !r ? 4 : r;
+}
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
@@ -2520,12 +2867,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_sensor_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
+ &amdgpu_debugfs_sensors_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2534,6 +2888,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
"amdgpu_gca_config",
+ "amdgpu_sensors",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76f96028313d..083e2b429872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
container_of(cb, struct amdgpu_flip_work, cb);
fence_put(f);
- schedule_work(&work->flip_work);
+ schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
@@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
static void amdgpu_flip_work_func(struct work_struct *__work)
{
+ struct delayed_work *delayed_work =
+ container_of(__work, struct delayed_work, work);
struct amdgpu_flip_work *work =
- container_of(__work, struct amdgpu_flip_work, flip_work);
+ container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i, repcnt = 4;
- int vpos, hpos, stat, min_udelay = 0;
- struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+ unsigned i;
+ int vpos, hpos;
if (amdgpu_flip_handle_fence(work, &work->excl))
return;
@@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
if (amdgpu_flip_handle_fence(work, &work->shared[i]))
return;
- /* We borrow the event spin lock for protecting flip_status */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
- /* If this happens to execute within the "virtually extended" vblank
- * interval before the start of the real vblank interval then it needs
- * to delay programming the mmio flip until the real vblank is entered.
- * This prevents completing a flip too early due to the way we fudge
- * our vblank counter and vblank timestamps in order to work around the
- * problem that the hw fires vblank interrupts before actual start of
- * vblank (when line buffer refilling is done for a frame). It
- * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
- * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
- *
- * In practice this won't execute very often unless on very fast
- * machines because the time window for this to happen is very small.
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
*/
- while (amdgpuCrtc->enabled && --repcnt) {
- /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
- * start in hpos, and to the "fudged earlier" vblank start in
- * vpos.
- */
- stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
- GET_DISTANCE_TO_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode);
-
- if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
- !(vpos >= 0 && hpos <= 0))
- break;
-
- /* Sleep at least until estimated real start of hw vblank */
- min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
- if (min_udelay > vblank->framedur_ns / 2000) {
- /* Don't wait ridiculously long - something is wrong */
- repcnt = 0;
- break;
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- usleep_range(min_udelay, 2 * min_udelay);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (amdgpuCrtc->enabled &&
+ (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(work->target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
+ return;
}
- if (!repcnt)
- DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
- "framedur %d, linedur %d, stat %d, vpos %d, "
- "hpos %d\n", work->crtc_id, min_udelay,
- vblank->framedur_ns / 1000,
- vblank->linedur_ns / 1000, stat, vpos, hpos);
+ /* We borrow the event spin lock for protecting flip_status */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
@@ -154,25 +123,25 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
int r;
/* unpin of the old buffer */
- r = amdgpu_bo_reserve(work->old_rbo, false);
+ r = amdgpu_bo_reserve(work->old_abo, false);
if (likely(r == 0)) {
- r = amdgpu_bo_unpin(work->old_rbo);
+ r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) {
DRM_ERROR("failed to unpin buffer after flip\n");
}
- amdgpu_bo_unreserve(work->old_rbo);
+ amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- amdgpu_bo_unref(&work->old_rbo);
+ amdgpu_bo_unref(&work->old_abo);
kfree(work->shared);
kfree(work);
}
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -181,7 +150,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
struct amdgpu_framebuffer *new_amdgpu_fb;
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
- struct amdgpu_bo *new_rbo;
+ struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
u64 base;
@@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
- INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
+ INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
work->event = event;
@@ -204,28 +173,28 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
obj = old_amdgpu_fb->obj;
/* take a reference to the old object */
- work->old_rbo = gem_to_amdgpu_bo(obj);
- amdgpu_bo_ref(work->old_rbo);
+ work->old_abo = gem_to_amdgpu_bo(obj);
+ amdgpu_bo_ref(work->old_abo);
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
obj = new_amdgpu_fb->obj;
- new_rbo = gem_to_amdgpu_bo(obj);
+ new_abo = gem_to_amdgpu_bo(obj);
/* pin the new buffer */
- r = amdgpu_bo_reserve(new_rbo, false);
+ r = amdgpu_bo_reserve(new_abo, false);
if (unlikely(r != 0)) {
- DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+ DRM_ERROR("failed to reserve new abo buffer before flip\n");
goto cleanup;
}
- r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
+ r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
if (unlikely(r != 0)) {
r = -EINVAL;
- DRM_ERROR("failed to pin new rbo buffer before flip\n");
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
- r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
+ r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
@@ -233,16 +202,12 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
goto unpin;
}
- amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
- amdgpu_bo_unreserve(new_rbo);
+ amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
+ amdgpu_bo_unreserve(new_abo);
work->base = base;
-
- r = drm_crtc_vblank_get(crtc);
- if (r) {
- DRM_ERROR("failed to get vblank before flip\n");
- goto pflip_cleanup;
- }
+ work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
- goto vblank_cleanup;
+ goto pflip_cleanup;
}
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
@@ -262,26 +227,23 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- amdgpu_flip_work_func(&work->flip_work);
+ amdgpu_flip_work_func(&work->flip_work.work);
return 0;
-vblank_cleanup:
- drm_crtc_vblank_put(crtc);
-
pflip_cleanup:
- if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
- DRM_ERROR("failed to reserve new rbo in error path\n");
+ if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
+ DRM_ERROR("failed to reserve new abo in error path\n");
goto cleanup;
}
unpin:
- if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
- DRM_ERROR("failed to unpin new rbo in error path\n");
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
+ DRM_ERROR("failed to unpin new abo in error path\n");
}
unreserve:
- amdgpu_bo_unreserve(new_rbo);
+ amdgpu_bo_unreserve(new_abo);
cleanup:
- amdgpu_bo_unref(&work->old_rbo);
+ amdgpu_bo_unref(&work->old_abo);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
@@ -335,7 +297,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set)
return ret;
}
-static const char *encoder_names[38] = {
+static const char *encoder_names[41] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -374,6 +336,9 @@ static const char *encoder_names[38] = {
"TRAVIS",
"INTERNAL_VCE",
"INTERNAL_UNIPHY3",
+ "HDMI_ANX9805",
+ "INTERNAL_AMCLK",
+ "VIRTUAL",
};
static const char *hpd_names[6] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9aa533cf4ad1..71ed27eb3dde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -53,13 +53,19 @@
* - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
* at the end of IBs.
* - 3.3.0 - Add VM support for UVD on supported hardware.
+ * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
+ * - 3.5.0 - Add support for new UVD_NO_OP register.
+ * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
+ * - 3.7.0 - Add support for VCE clock list packet
+ * - 3.8.0 - Add support raster config init in the kernel
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 3
+#define KMS_DRIVER_MINOR 8
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
+int amdgpu_moverate = -1; /* auto */
int amdgpu_benchmarking = 0;
int amdgpu_testing = 0;
int amdgpu_audio = -1;
@@ -84,11 +90,14 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
int amdgpu_powercontainment = 1;
+int amdgpu_sclk_deep_sleep_en = 1;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff;
unsigned amdgpu_pg_mask = 0xffffffff;
char *amdgpu_disable_cu = NULL;
+char *amdgpu_virtual_display = NULL;
+unsigned amdgpu_pp_feature_mask = 0xffffffff;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -96,6 +105,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
module_param_named(gartsize, amdgpu_gart_size, int, 0600);
+MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
+module_param_named(moverate, amdgpu_moverate, int, 0600);
+
MODULE_PARM_DESC(benchmark, "Run benchmark");
module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
@@ -162,13 +174,17 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
-#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
-#endif
+
+MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
+
+MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
+module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
@@ -185,7 +201,84 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+
static const struct pci_device_id pciidlist[] = {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+ {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+ {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+ {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@@ -341,7 +434,7 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
kfree(ap);
return 0;
@@ -383,32 +476,70 @@ amdgpu_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
+static void
+amdgpu_pci_shutdown(struct pci_dev *pdev)
+{
+ /* if we are running in a VM, make sure the device
+ * torn down properly on reboot/shutdown.
+ * unfortunately we can't detect certain
+ * hypervisors so just do this all the time.
+ */
+ amdgpu_pci_remove(pdev);
+}
+
static int amdgpu_pmops_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_suspend_kms(drm_dev, true, true);
+ return amdgpu_device_suspend(drm_dev, true, true);
}
static int amdgpu_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_resume_kms(drm_dev, true, true);
+
+ /* GPU comes up enabled by the bios on resume */
+ if (amdgpu_device_is_px(drm_dev)) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return amdgpu_device_resume(drm_dev, true, true);
}
static int amdgpu_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_suspend_kms(drm_dev, false, true);
+ return amdgpu_device_suspend(drm_dev, false, true);
}
static int amdgpu_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return amdgpu_device_resume(drm_dev, false, true);
+}
+
+static int amdgpu_pmops_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return amdgpu_device_suspend(drm_dev, true, true);
+}
+
+static int amdgpu_pmops_restore(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_resume_kms(drm_dev, false, true);
+ return amdgpu_device_resume(drm_dev, false, true);
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -426,7 +557,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
- ret = amdgpu_suspend_kms(drm_dev, false, false);
+ ret = amdgpu_device_suspend(drm_dev, false, false);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
@@ -459,7 +590,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = amdgpu_resume_kms(drm_dev, false, false);
+ ret = amdgpu_device_resume(drm_dev, false, false);
drm_kms_helper_poll_enable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
@@ -513,8 +644,8 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.resume = amdgpu_pmops_resume,
.freeze = amdgpu_pmops_freeze,
.thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_freeze,
- .restore = amdgpu_pmops_resume,
+ .poweroff = amdgpu_pmops_poweroff,
+ .restore = amdgpu_pmops_restore,
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -596,6 +727,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.id_table = pciidlist,
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
+ .shutdown = amdgpu_pci_shutdown,
.driver.pm = &amdgpu_pm_ops,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 919146780a15..9fb8aa4d6bae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
@@ -48,8 +48,35 @@ struct amdgpu_fbdev {
struct amdgpu_device *adev;
};
+static int
+amdgpufb_open(struct fb_info *info, int user)
+{
+ struct amdgpu_fbdev *rfbdev = info->par;
+ struct amdgpu_device *adev = rfbdev->adev;
+ int ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+amdgpufb_release(struct fb_info *info, int user)
+{
+ struct amdgpu_fbdev *rfbdev = info->par;
+ struct amdgpu_device *adev = rfbdev->adev;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return 0;
+}
+
static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
+ .fb_open = amdgpufb_open,
+ .fb_release = amdgpufb_release,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
@@ -88,14 +115,14 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
int ret;
- ret = amdgpu_bo_reserve(rbo, false);
+ ret = amdgpu_bo_reserve(abo, false);
if (likely(ret == 0)) {
- amdgpu_bo_kunmap(rbo);
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_kunmap(abo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
drm_gem_object_unreference_unlocked(gobj);
}
@@ -106,7 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
{
struct amdgpu_device *adev = rfbdev->adev;
struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *rbo = NULL;
+ struct amdgpu_bo *abo = NULL;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
@@ -132,30 +159,30 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
aligned_size);
return -ENOMEM;
}
- rbo = gem_to_amdgpu_bo(gobj);
+ abo = gem_to_amdgpu_bo(gobj);
if (fb_tiled)
tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
- ret = amdgpu_bo_reserve(rbo, false);
+ ret = amdgpu_bo_reserve(abo, false);
if (unlikely(ret != 0))
goto out_unref;
if (tiling_flags) {
- ret = amdgpu_bo_set_tiling_flags(rbo,
+ ret = amdgpu_bo_set_tiling_flags(abo,
tiling_flags);
if (ret)
dev_err(adev->dev, "FB failed to set tiling flags\n");
}
- ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
+ ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
if (ret) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
goto out_unref;
}
- ret = amdgpu_bo_kmap(rbo, NULL);
- amdgpu_bo_unreserve(rbo);
+ ret = amdgpu_bo_kmap(abo, NULL);
+ amdgpu_bo_unreserve(abo);
if (ret) {
goto out_unref;
}
@@ -177,7 +204,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *rbo = NULL;
+ struct amdgpu_bo *abo = NULL;
int ret;
unsigned long tmp;
@@ -196,7 +223,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
return ret;
}
- rbo = gem_to_amdgpu_bo(gobj);
+ abo = gem_to_amdgpu_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = drm_fb_helper_alloc_fbi(helper);
@@ -219,7 +246,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup helper */
rfbdev->helper.fb = fb;
- memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
+ memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo));
strcpy(info->fix.id, "amdgpudrmfb");
@@ -228,11 +255,11 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &amdgpufb_ops;
- tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
+ tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp;
- info->fix.smem_len = amdgpu_bo_size(rbo);
- info->screen_base = rbo->kptr;
- info->screen_size = amdgpu_bo_size(rbo);
+ info->fix.smem_len = amdgpu_bo_size(abo);
+ info->screen_base = abo->kptr;
+ info->screen_size = amdgpu_bo_size(abo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -249,7 +276,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
+ DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
@@ -259,7 +286,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unref:
- if (rbo) {
+ if (abo) {
}
if (fb && ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 0b109aebfec6..3a2e42f4b897 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -454,6 +454,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
+ ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 921bce2df0b0..21a1242fc13b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
@@ -238,7 +238,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = NULL;
#endif
page_base = adev->dummy_page.addr;
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
{
@@ -286,7 +286,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = pagelist[i];
#endif
if (adev->gart.ptr) {
@@ -331,7 +331,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
/* Allocate pages table */
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
if (adev->gart.pages == NULL) {
@@ -357,7 +357,7 @@ void amdgpu_gart_fini(struct amdgpu_device *adev)
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
}
adev->gart.ready = false;
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
vfree(adev->gart.pages);
adev->gart.pages = NULL;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index 503d54098128..e73728d90388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -31,14 +31,6 @@
#define AMDGPU_GWS_SHIFT PAGE_SHIFT
#define AMDGPU_OA_SHIFT PAGE_SHIFT
-#define AMDGPU_PL_GDS TTM_PL_PRIV0
-#define AMDGPU_PL_GWS TTM_PL_PRIV1
-#define AMDGPU_PL_OA TTM_PL_PRIV2
-
-#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0
-#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1
-#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2
-
struct amdgpu_ring;
struct amdgpu_bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 88fbed2389c0..a7ea9a3b454e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -118,23 +118,23 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
*/
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = rbo->adev;
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = abo->adev;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
int r;
- r = amdgpu_bo_reserve(rbo, false);
+ r = amdgpu_bo_reserve(abo, false);
if (r)
return r;
- bo_va = amdgpu_vm_bo_find(vm, rbo);
+ bo_va = amdgpu_vm_bo_find(vm, abo);
if (!bo_va) {
- bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
+ bo_va = amdgpu_vm_bo_add(adev, vm, abo);
} else {
++bo_va->ref_count;
}
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return 0;
}
@@ -528,7 +528,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error_unreserve;
if (operation == AMDGPU_VA_OP_MAP)
- r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
error_unreserve:
ttm_eu_backoff_reservation(&ticket, &list);
@@ -547,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
struct ttm_validate_buffer tv, tv_pd;
struct ww_acquire_ctx ticket;
@@ -587,10 +587,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- rbo = gem_to_amdgpu_bo(gobj);
+ abo = gem_to_amdgpu_bo(gobj);
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
- tv.bo = &rbo->tbo;
+ tv.bo = &abo->tbo;
tv.shared = true;
list_add(&tv.head, &list);
@@ -604,7 +604,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return r;
}
- bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
+ bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
if (!bo_va) {
ttm_eu_backoff_reservation(&ticket, &list);
drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
new file mode 100644
index 000000000000..f86c84427778
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_gtt_mgr {
+ struct drm_mm mm;
+ spinlock_t lock;
+ uint64_t available;
+};
+
+/**
+ * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of GTT
+ *
+ * Allocate and initialize the GTT manager.
+ */
+static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct amdgpu_gtt_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ drm_mm_init(&mgr->mm, 0, p_size);
+ spin_lock_init(&mgr->lock);
+ mgr->available = p_size;
+ man->priv = mgr;
+ return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_fini - free and destroy GTT manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the GTT manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ if (!drm_mm_clean(&mgr->mm)) {
+ spin_unlock(&mgr->lock);
+ return -EBUSY;
+ }
+
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
+ kfree(mgr);
+ man->priv = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_alloc - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate the address space for a node.
+ */
+int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct drm_mm_node *node = mem->mm_node;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
+ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ unsigned long fpfn, lpfn;
+ int r;
+
+ if (node->start != AMDGPU_BO_INVALID_OFFSET)
+ return 0;
+
+ if (place)
+ fpfn = place->fpfn;
+ else
+ fpfn = 0;
+
+ if (place && place->lpfn)
+ lpfn = place->lpfn;
+ else
+ lpfn = man->size;
+
+ if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
+ aflags = DRM_MM_CREATE_TOP;
+ }
+
+ spin_lock(&mgr->lock);
+ r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
+ mem->page_alignment, 0,
+ fpfn, lpfn, sflags, aflags);
+ spin_unlock(&mgr->lock);
+
+ if (!r) {
+ mem->start = node->start;
+ if (&tbo->mem == mem)
+ tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
+ tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
+ }
+
+ return r;
+}
+
+/**
+ * amdgpu_gtt_mgr_new - allocate a new node
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Dummy, allocate the node but no space for it yet.
+ */
+static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct drm_mm_node *node;
+ int r;
+
+ spin_lock(&mgr->lock);
+ if (mgr->available < mem->num_pages) {
+ spin_unlock(&mgr->lock);
+ return 0;
+ }
+ mgr->available -= mem->num_pages;
+ spin_unlock(&mgr->lock);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->start = AMDGPU_BO_INVALID_OFFSET;
+ mem->mm_node = node;
+
+ if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
+ r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
+ if (unlikely(r)) {
+ kfree(node);
+ mem->mm_node = NULL;
+ }
+ } else {
+ mem->start = node->start;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated GTT again.
+ */
+static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct drm_mm_node *node = mem->mm_node;
+
+ if (!node)
+ return;
+
+ spin_lock(&mgr->lock);
+ if (node->start != AMDGPU_BO_INVALID_OFFSET)
+ drm_mm_remove_node(node);
+ mgr->available += mem->num_pages;
+ spin_unlock(&mgr->lock);
+
+ kfree(node);
+ mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_gtt_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ drm_mm_debug_table(&mgr->mm, prefix);
+ spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
+ amdgpu_gtt_mgr_init,
+ amdgpu_gtt_mgr_fini,
+ amdgpu_gtt_mgr_new,
+ amdgpu_gtt_mgr_del,
+ amdgpu_gtt_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 31a676376d73..91d367399956 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -158,8 +158,8 @@ static const struct i2c_algorithm amdgpu_atombios_i2c_algo = {
};
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
- struct amdgpu_i2c_bus_rec *rec,
- const char *name)
+ const struct amdgpu_i2c_bus_rec *rec,
+ const char *name)
{
struct amdgpu_i2c_chan *i2c;
int ret;
@@ -186,10 +186,8 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
- if (ret) {
- DRM_ERROR("Failed to register hw i2c %s\n", name);
+ if (ret)
goto out_free;
- }
} else {
/* set the amdgpu bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
@@ -222,6 +220,7 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
{
if (!i2c)
return;
+ WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
@@ -251,8 +250,8 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
/* Add additional buses */
void amdgpu_i2c_add(struct amdgpu_device *adev,
- struct amdgpu_i2c_bus_rec *rec,
- const char *name)
+ const struct amdgpu_i2c_bus_rec *rec,
+ const char *name)
{
struct drm_device *dev = adev->ddev;
int i;
@@ -268,7 +267,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev,
/* looks up bus based on id */
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
- struct amdgpu_i2c_bus_rec *i2c_bus)
+ const struct amdgpu_i2c_bus_rec *i2c_bus)
{
int i;
@@ -338,7 +337,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
/* ddc router switching */
void
-amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
+amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
{
u8 val;
@@ -367,7 +366,7 @@ amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
/* clock/data router switching */
void
-amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector)
+amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector)
{
u8 val;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index d81e19b53973..63c2ff7499e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -25,20 +25,20 @@
#define __AMDGPU_I2C_H__
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
- struct amdgpu_i2c_bus_rec *rec,
- const char *name);
+ const struct amdgpu_i2c_bus_rec *rec,
+ const char *name);
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
void amdgpu_i2c_init(struct amdgpu_device *adev);
void amdgpu_i2c_fini(struct amdgpu_device *adev);
void amdgpu_i2c_add(struct amdgpu_device *adev,
- struct amdgpu_i2c_bus_rec *rec,
- const char *name);
+ const struct amdgpu_i2c_bus_rec *rec,
+ const char *name);
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
- struct amdgpu_i2c_bus_rec *i2c_bus);
+ const struct amdgpu_i2c_bus_rec *i2c_bus);
void
-amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector);
+amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *connector);
void
-amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector);
+amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index a31d7ef3032c..6a6c86c9c169 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -124,7 +124,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
- uint64_t ctx;
+ uint64_t fence_ctx;
+ uint32_t status = 0, alloc_size;
unsigned i;
int r = 0;
@@ -135,14 +136,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* ring tests don't use a job */
if (job) {
vm = job->vm;
- ctx = job->ctx;
+ fence_ctx = job->fence_ctx;
} else {
vm = NULL;
- ctx = 0;
+ fence_ctx = 0;
}
if (!ring->ready) {
- dev_err(adev->dev, "couldn't schedule ib\n");
+ dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
@@ -151,7 +152,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- r = amdgpu_ring_alloc(ring, 256 * num_ibs);
+ alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
+ num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+
+ r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r;
@@ -174,13 +178,22 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
- skip_preamble = ring->current_ctx == ctx;
- need_ctx_switch = ring->current_ctx != ctx;
+ skip_preamble = ring->current_ctx == fence_ctx;
+ need_ctx_switch = ring->current_ctx != fence_ctx;
+ if (job && ring->funcs->emit_cntxcntl) {
+ if (need_ctx_switch)
+ status |= AMDGPU_HAVE_CTX_SWITCH;
+ status |= job->preamble_status;
+ amdgpu_ring_emit_cntxcntl(ring, status);
+ }
+
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
/* drop preamble IBs if we don't have a context switch */
- if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
+ skip_preamble &&
+ !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
continue;
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
@@ -209,7 +222,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
- ring->current_ctx = ctx;
+ ring->current_ctx = fence_ctx;
+ if (ring->funcs->emit_switch_buffer)
+ amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_commit(ring);
return 0;
}
@@ -280,7 +295,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
- int r;
+ int r, ret = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -301,10 +316,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
} else {
/* still not good, but we can live with it */
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+ ret = r;
}
}
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 534fc04e80fd..3ab4c65ecc8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -40,32 +40,15 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
/* Allocate ring buffer */
if (adev->irq.ih.ring_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, NULL, &adev->irq.ih.ring_obj);
+ r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->irq.ih.ring_obj,
+ &adev->irq.ih.gpu_addr,
+ (void **)&adev->irq.ih.ring);
if (r) {
DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->irq.ih.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
- DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
- (void **)&adev->irq.ih.ring);
- amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
- if (r) {
- DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
- return r;
- }
}
return 0;
}
@@ -136,8 +119,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
*/
void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
{
- int r;
-
if (adev->irq.ih.use_bus_addr) {
if (adev->irq.ih.ring) {
/* add 8 bytes for the rptr/wptr shadows and
@@ -149,17 +130,9 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
adev->irq.ih.ring = NULL;
}
} else {
- if (adev->irq.ih.ring_obj) {
- r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
- amdgpu_bo_unpin(adev->irq.ih.ring_obj);
- amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
- }
- amdgpu_bo_unref(&adev->irq.ih.ring_obj);
- adev->irq.ih.ring = NULL;
- adev->irq.ih.ring_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
+ &adev->irq.ih.gpu_addr,
+ (void **)&adev->irq.ih.ring);
amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 7ef09352e534..f016464035b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -70,6 +70,7 @@ struct amdgpu_irq {
/* gen irq stuff */
struct irq_domain *domain; /* GPU irq controller domain */
unsigned virq[AMDGPU_MAX_IRQ_SRC_ID];
+ uint32_t srbm_soft_reset;
};
void amdgpu_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 6674d40eb3ab..8c5807994073 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -91,7 +91,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i], f);
}
-void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -124,7 +124,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
return r;
job->owner = owner;
- job->ctx = entity->fence_context;
+ job->fence_ctx = entity->fence_context;
*f = fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d942654a1de0..c2c7fb140338 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -292,14 +292,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask = adev->uvd.ring.ready ? 1 : 0;
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
- for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
+ for (i = 0; i < adev->vce.num_rings; i++)
ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ib_size_alignment = 1;
break;
default:
return -EINVAL;
@@ -373,6 +373,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_NUM_BYTES_MOVED:
ui64 = atomic64_read(&adev->num_bytes_moved);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_NUM_EVICTIONS:
+ ui64 = atomic64_read(&adev->num_evictions);
+ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
ui64 = atomic64_read(&adev->vram_usage);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
@@ -539,12 +542,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
- if (unlikely(!fpriv))
- return -ENOMEM;
+ if (unlikely(!fpriv)) {
+ r = -ENOMEM;
+ goto out_suspend;
+ }
r = amdgpu_vm_init(adev, &fpriv->vm);
- if (r)
- goto error_free;
+ if (r) {
+ kfree(fpriv);
+ goto out_suspend;
+ }
mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles);
@@ -553,12 +560,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = fpriv;
+out_suspend:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
- return 0;
-
-error_free:
- kfree(fpriv);
return r;
}
@@ -597,6 +601,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
}
/**
@@ -611,6 +618,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
+ pm_runtime_get_sync(dev->dev);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6b1d7d306564..7b0eff7d060b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -39,6 +39,8 @@
#include <drm/drm_plane_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/hrtimer.h>
+#include "amdgpu_irq.h"
struct amdgpu_bo;
struct amdgpu_device;
@@ -339,6 +341,8 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
+ struct hrtimer vblank_timer;
+ enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -587,10 +591,10 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
void amdgpu_print_display_setup(struct drm_device *dev);
int amdgpu_modeset_create_props(struct amdgpu_device *adev);
int amdgpu_crtc_set_config(struct drm_mode_set *set);
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags);
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6f0873c75a25..aa074fac0c7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -38,20 +38,17 @@
#include "amdgpu_trace.h"
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
struct ttm_mem_reg *mem)
{
- u64 ret = 0;
- if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
- ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
- adev->mc.visible_vram_size ?
- adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
- mem->size;
- }
- return ret;
+ if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
+ return 0;
+
+ return ((mem->start << PAGE_SHIFT) + mem->size) >
+ adev->mc.visible_vram_size ?
+ adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
+ mem->size;
}
static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
@@ -99,6 +96,11 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
+ if (!list_empty(&bo->shadow_list)) {
+ mutex_lock(&bo->adev->shadow_list_lock);
+ list_del_init(&bo->shadow_list);
+ mutex_unlock(&bo->adev->shadow_list_lock);
+ }
kfree(bo->metadata);
kfree(bo);
}
@@ -112,90 +114,99 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
struct ttm_placement *placement,
- struct ttm_place *placements,
+ struct ttm_place *places,
u32 domain, u64 flags)
{
- u32 c = 0, i;
-
- placement->placement = placements;
- placement->busy_placement = placements;
+ u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+ unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
- adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- placements[c].fpfn =
- adev->mc.visible_vram_size >> PAGE_SHIFT;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
+ !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+ adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+ places[c].fpfn = visible_pfn;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_TOPDOWN;
+ c++;
}
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
- if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
- placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ places[c].lpfn = visible_pfn;
+ else
+ places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_TT;
+ if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
- } else {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
- }
+ else
+ places[c].flags |= TTM_PL_FLAG_CACHED;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_SYSTEM;
+ if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
- } else {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
- }
+ else
+ places[c].flags |= TTM_PL_FLAG_CACHED;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_GDS;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+ c++;
}
+
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_GWS;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+ c++;
}
+
if (domain & AMDGPU_GEM_DOMAIN_OA) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_OA;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+ c++;
}
if (!c) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ c++;
}
+
placement->num_placement = c;
- placement->num_busy_placement = c;
+ placement->placement = places;
- for (i = 0; i < c; i++) {
- if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- (placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !placements[i].fpfn)
- placements[i].lpfn =
- adev->mc.visible_vram_size >> PAGE_SHIFT;
- else
- placements[i].lpfn = 0;
- }
+ placement->num_busy_placement = c;
+ placement->busy_placement = places;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
- amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
- rbo->placements, domain, rbo->flags);
+ amdgpu_ttm_placement_init(abo->adev, &abo->placement,
+ abo->placements, domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -211,6 +222,98 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
bo->placement.busy_placement = bo->placements;
}
+/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
+{
+ int r;
+
+ r = amdgpu_bo_create(adev, size, align, true, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL, bo_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(*bo_ptr, false);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
+ goto error_free;
+ }
+
+ r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+ goto error_unreserve;
+ }
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
+ goto error_unreserve;
+ }
+ }
+
+ amdgpu_bo_unreserve(*bo_ptr);
+
+ return 0;
+
+error_unreserve:
+ amdgpu_bo_unreserve(*bo_ptr);
+
+error_free:
+ amdgpu_bo_unref(bo_ptr);
+
+ return r;
+}
+
+/**
+ * amdgpu_bo_free_kernel - free BO for kernel use
+ *
+ * @bo: amdgpu BO to free
+ *
+ * unmaps and unpin a BO for kernel internal use.
+ */
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+ void **cpu_addr)
+{
+ if (*bo == NULL)
+ return;
+
+ if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
+ if (cpu_addr)
+ amdgpu_bo_kunmap(*bo);
+
+ amdgpu_bo_unpin(*bo);
+ amdgpu_bo_unreserve(*bo);
+ }
+ amdgpu_bo_unref(bo);
+
+ if (gpu_addr)
+ *gpu_addr = 0;
+
+ if (cpu_addr)
+ *cpu_addr = NULL;
+}
+
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
@@ -249,7 +352,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
return r;
}
bo->adev = adev;
- INIT_LIST_HEAD(&bo->list);
+ INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
@@ -277,11 +380,79 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
if (unlikely(r != 0)) {
return r;
}
+
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+ bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ struct fence *fence;
+
+ if (adev->mman.buffer_funcs_ring == NULL ||
+ !adev->mman.buffer_funcs_ring->ready) {
+ r = -EBUSY;
+ goto fail_free;
+ }
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ goto fail_free;
+
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r != 0))
+ goto fail_unreserve;
+
+ amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ amdgpu_bo_fence(bo, fence, false);
+ amdgpu_bo_unreserve(bo);
+ fence_put(bo->tbo.moving);
+ bo->tbo.moving = fence_get(fence);
+ fence_put(fence);
+ }
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
return 0;
+
+fail_unreserve:
+ amdgpu_bo_unreserve(bo);
+fail_free:
+ amdgpu_bo_unref(&bo);
+ return r;
+}
+
+static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ unsigned long size, int byte_align,
+ struct amdgpu_bo *bo)
+{
+ struct ttm_placement placement = {0};
+ struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ int r;
+
+ if (bo->shadow)
+ return 0;
+
+ bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
+ memset(&placements, 0,
+ (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+ amdgpu_ttm_placement_init(adev, &placement,
+ placements, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+ r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+ NULL, &placement,
+ bo->tbo.resv,
+ &bo->shadow);
+ if (!r) {
+ bo->shadow->parent = amdgpu_bo_ref(bo);
+ mutex_lock(&adev->shadow_list_lock);
+ list_add_tail(&bo->shadow_list, &adev->shadow_list);
+ mutex_unlock(&adev->shadow_list_lock);
+ }
+
+ return r;
}
int amdgpu_bo_create(struct amdgpu_device *adev,
@@ -293,6 +464,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ int r;
memset(&placements, 0,
(AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
@@ -300,9 +472,83 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
amdgpu_ttm_placement_init(adev, &placement,
placements, domain, flags);
- return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
- domain, flags, sg, &placement,
- resv, bo_ptr);
+ r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+ domain, flags, sg, &placement,
+ resv, bo_ptr);
+ if (r)
+ return r;
+
+ if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
+ r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+ if (r)
+ amdgpu_bo_unref(bo_ptr);
+ }
+
+ return r;
+}
+
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct)
+
+{
+ struct amdgpu_bo *shadow = bo->shadow;
+ uint64_t bo_addr, shadow_addr;
+ int r;
+
+ if (!shadow)
+ return -EINVAL;
+
+ bo_addr = amdgpu_bo_gpu_offset(bo);
+ shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ goto err;
+
+ r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
+ amdgpu_bo_size(bo), resv, fence,
+ direct);
+ if (!r)
+ amdgpu_bo_fence(bo, *fence, true);
+
+err:
+ return r;
+}
+
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct)
+
+{
+ struct amdgpu_bo *shadow = bo->shadow;
+ uint64_t bo_addr, shadow_addr;
+ int r;
+
+ if (!shadow)
+ return -EINVAL;
+
+ bo_addr = amdgpu_bo_gpu_offset(bo);
+ shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ goto err;
+
+ r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
+ amdgpu_bo_size(bo), resv, fence,
+ direct);
+ if (!r)
+ amdgpu_bo_fence(bo, *fence, true);
+
+err:
+ return r;
}
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -380,16 +626,17 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
if (bo->pin_count) {
+ uint32_t mem_type = bo->tbo.mem.mem_type;
+
+ if (domain != amdgpu_mem_type_to_domain(mem_type))
+ return -EINVAL;
+
bo->pin_count++;
if (gpu_addr)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (max_offset != 0) {
- u64 domain_start;
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
- domain_start = bo->adev->mc.vram_start;
- else
- domain_start = bo->adev->mc.gtt_start;
+ u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
WARN_ON_ONCE(max_offset <
(amdgpu_bo_gpu_offset(bo) - domain_start));
}
@@ -401,7 +648,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
- (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
+ (!max_offset || max_offset >
+ bo->adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
bo->adev->mc.visible_vram_size))
return -EINVAL;
@@ -420,19 +668,28 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (likely(r == 0)) {
- bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- bo->adev->vram_pin_size += amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
- } else
- bo->adev->gart_pin_size += amdgpu_bo_size(bo);
- } else {
+ if (unlikely(r)) {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
+ goto error;
+ }
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (unlikely(r)) {
+ dev_err(bo->adev->dev, "%p bind failed\n", bo);
+ goto error;
}
+
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = amdgpu_bo_gpu_offset(bo);
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+ bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+ }
+
+error:
return r;
}
@@ -457,16 +714,20 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
- } else
- bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
- } else {
+ if (unlikely(r)) {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+ goto error;
}
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+ }
+
+error:
return r;
}
@@ -588,23 +849,23 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
- rbo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(rbo->adev, rbo);
+ abo = container_of(bo, struct amdgpu_bo, tbo);
+ amdgpu_vm_bo_invalidate(abo->adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+ amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
- trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
+ trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -637,7 +898,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
for (i = 0; i < abo->placement.num_placement; i++) {
/* Force into visible VRAM */
if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
+ (!abo->placements[i].lpfn ||
+ abo->placements[i].lpfn > lpfn))
abo->placements[i].lpfn = lpfn;
}
r = ttm_bo_validate(bo, &abo->placement, false, false);
@@ -674,3 +936,24 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
else
reservation_object_add_excl_fence(resv, fence);
}
+
+/**
+ * amdgpu_bo_gpu_offset - return GPU offset of bo
+ * @bo: amdgpu object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+{
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
+ !amdgpu_ttm_is_bound(bo->tbo.ttm));
+ WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+ !bo->pin_count);
+ WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+
+ return bo->tbo.offset;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index bdb01d932548..8255034d73eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -31,6 +31,8 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@@ -85,21 +87,6 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
ttm_bo_unreserve(&bo->tbo);
}
-/**
- * amdgpu_bo_gpu_offset - return GPU offset of bo
- * @bo: amdgpu object for which we query the offset
- *
- * Returns current GPU offset of the object.
- *
- * Note: object should either be pinned or reserved when calling this
- * function, it might be useful to add check for this for debugging.
- */
-static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
-{
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
- return bo->tbo.offset;
-}
-
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{
return bo->tbo.num_pages << PAGE_SHIFT;
@@ -139,6 +126,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct ttm_placement *placement,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+ void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
@@ -165,6 +158,19 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared);
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence, bool direct);
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct);
+
/*
* sub allocation
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index d15314957732..8e67c1210d7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "atom.h"
#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
#include <asm/div64.h>
#include <linux/gcd.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ff63b88b0ffa..accc908bdc88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
char *table = NULL;
- int size, i;
+ int size;
if (adev->pp_enabled)
size = amdgpu_dpm_get_pp_table(adev, &table);
@@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
- for (i = 0; i < size; i++) {
- sprintf(buf + i, "%02x", table[i]);
- }
- sprintf(buf + i, "\n");
+ memcpy(buf, table, size);
return size;
}
@@ -1106,54 +1103,46 @@ force:
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled)
+ if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+ /* enable/disable UVD */
+ mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable);
- else {
- if (adev->pm.funcs->powergate_uvd) {
+ mutex_unlock(&adev->pm.mutex);
+ } else {
+ if (enable) {
mutex_lock(&adev->pm.mutex);
- /* enable/disable UVD */
- amdgpu_dpm_powergate_uvd(adev, !enable);
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
mutex_unlock(&adev->pm.mutex);
} else {
- if (enable) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- mutex_unlock(&adev->pm.mutex);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.uvd_active = false;
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_pm_compute_clocks(adev);
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.uvd_active = false;
+ mutex_unlock(&adev->pm.mutex);
}
-
+ amdgpu_pm_compute_clocks(adev);
}
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled)
+ if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+ /* enable/disable VCE */
+ mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable);
- else {
- if (adev->pm.funcs->powergate_vce) {
+ mutex_unlock(&adev->pm.mutex);
+ } else {
+ if (enable) {
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_vce(adev, !enable);
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
- if (enable) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
- mutex_unlock(&adev->pm.mutex);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.vce_active = false;
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_pm_compute_clocks(adev);
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.vce_active = false;
+ mutex_unlock(&adev->pm.mutex);
}
+ amdgpu_pm_compute_clocks(adev);
}
}
@@ -1333,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
+{
+ int32_t value;
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* GPU Clocks */
+ seq_printf(m, "GFX Clocks and Power:\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
+ seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
+ seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
+ seq_printf(m, "\t%u mV (VDDGFX)\n", value);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
+ seq_printf(m, "\t%u mV (VDDNB)\n", value);
+ seq_printf(m, "\n");
+
+ /* GPU Temp */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
+ seq_printf(m, "GPU Temperature: %u C\n", value/1000);
+
+ /* GPU Load */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
+ seq_printf(m, "GPU Load: %u %%\n", value);
+ seq_printf(m, "\n");
+
+ /* UVD clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
+ if (!value) {
+ seq_printf(m, "UVD: Disabled\n");
+ } else {
+ seq_printf(m, "UVD: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
+ }
+ seq_printf(m, "\n");
+
+ /* VCE clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
+ if (!value) {
+ seq_printf(m, "VCE: Disabled\n");
+ } else {
+ seq_printf(m, "VCE: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
+ seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ }
+ }
+
+ return 0;
+}
+
static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1348,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
} else if (adev->pp_enabled) {
- amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+ return amdgpu_debugfs_pm_info_pp(m, adev);
} else {
mutex_lock(&adev->pm.mutex);
if (adev->pm.funcs->debugfs_print_current_performance_level)
- amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+ adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index c5738a22b690..7532ff822aa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -30,6 +30,7 @@
#include "amdgpu_pm.h"
#include <drm/amdgpu_drm.h>
#include "amdgpu_powerplay.h"
+#include "si_dpm.h"
#include "cik_dpm.h"
#include "vi_dpm.h"
@@ -41,7 +42,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
amd_pp = &(adev->powerplay);
if (adev->pp_enabled) {
-#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amd_pp_init *pp_init;
pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
@@ -52,15 +52,21 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
- pp_init->powercontainment_enabled = amdgpu_powercontainment;
-
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
-#endif
} else {
amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ amd_pp->ip_funcs = &si_dpm_ip_funcs;
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -72,15 +78,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
break;
#endif
- case CHIP_TOPAZ:
- amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
- break;
- case CHIP_TONGA:
- amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
- break;
- case CHIP_FIJI:
- amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
- break;
case CHIP_CARRIZO:
case CHIP_STONEY:
amd_pp->ip_funcs = &cz_dpm_ip_funcs;
@@ -98,19 +95,17 @@ static int amdgpu_pp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
-#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
- adev->pp_enabled = true;
- break;
case CHIP_TONGA:
case CHIP_FIJI:
- adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
+ case CHIP_TOPAZ:
+ adev->pp_enabled = true;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
- adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+ adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
break;
/* These chips don't have powerplay implemenations */
case CHIP_BONAIRE:
@@ -118,14 +113,10 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
- case CHIP_TOPAZ:
default:
adev->pp_enabled = false;
break;
}
-#else
- adev->pp_enabled = false;
-#endif
ret = amdgpu_powerplay_init(adev);
if (ret)
@@ -147,12 +138,11 @@ static int amdgpu_pp_late_init(void *handle)
ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle);
-#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
-#endif
+
return ret;
}
@@ -165,10 +155,8 @@ static int amdgpu_pp_sw_init(void *handle)
ret = adev->powerplay.ip_funcs->sw_init(
adev->powerplay.pp_handle);
-#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled)
adev->pm.dpm_enabled = true;
-#endif
return ret;
}
@@ -219,7 +207,6 @@ static int amdgpu_pp_hw_fini(void *handle)
static void amdgpu_pp_late_fini(void *handle)
{
-#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled) {
@@ -230,7 +217,6 @@ static void amdgpu_pp_late_fini(void *handle)
if (adev->powerplay.ip_funcs->late_fini)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
-#endif
}
static int amdgpu_pp_suspend(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 85aeb0a804bb..e1fa8731d1e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -222,33 +222,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, NULL, &ring->ring_obj);
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = amdgpu_bo_reserve(ring->ring_obj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
- &ring->gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(ring->ring_obj);
- dev_err(adev->dev, "(%d) ring pin failed\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(ring->ring_obj,
- (void **)&ring->ring);
-
memset((void *)ring->ring, 0, ring->ring_size);
-
- amdgpu_bo_unreserve(ring->ring_obj);
- if (r) {
- dev_err(adev->dev, "(%d) ring map failed\n", r);
- return r;
- }
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->max_dw = max_dw;
@@ -269,29 +252,20 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- int r;
- struct amdgpu_bo *ring_obj;
-
- ring_obj = ring->ring_obj;
ring->ready = false;
- ring->ring = NULL;
- ring->ring_obj = NULL;
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
- if (ring_obj) {
- r = amdgpu_bo_reserve(ring_obj, false);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(ring_obj);
- amdgpu_bo_unpin(ring_obj);
- amdgpu_bo_unreserve(ring_obj);
- }
- amdgpu_bo_unref(&ring_obj);
- }
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
+
amdgpu_debugfs_ring_fini(ring);
+
+ ring->adev->rings[ring->idx] = NULL;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 05a53f4fc334..b827c75e95de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
- size, NULL, &fence);
+ size, NULL, &fence, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
- size, NULL, &fence);
+ size, NULL, &fence, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 0d8d65eb46cd..067e5e683bb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -247,7 +247,7 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TP_ARGS(mapping)
);
-TRACE_EVENT(amdgpu_vm_set_page,
+TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags),
TP_ARGS(pe, addr, count, incr, flags),
@@ -271,6 +271,24 @@ TRACE_EVENT(amdgpu_vm_set_page,
__entry->flags, __entry->count)
);
+TRACE_EVENT(amdgpu_vm_copy_ptes,
+ TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
+ TP_ARGS(pe, src, count),
+ TP_STRUCT__entry(
+ __field(u64, pe)
+ __field(u64, src)
+ __field(u32, count)
+ ),
+
+ TP_fast_assign(
+ __entry->pe = pe;
+ __entry->src = src;
+ __entry->count = count;
+ ),
+ TP_printk("pe=%010Lx, src=%010Lx, count=%u",
+ __entry->pe, __entry->src, __entry->count)
+);
+
TRACE_EVENT(amdgpu_vm_flush,
TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
TP_ARGS(pd_addr, ring, id),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b7742e62972a..887483b8b818 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -34,6 +34,7 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_memory.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <linux/seq_file.h>
@@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
ttm_mem_global_release(ref->object);
}
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
@@ -88,10 +89,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
global_ref->init = &amdgpu_ttm_mem_global_init;
global_ref->release = &amdgpu_ttm_mem_global_release;
r = drm_global_item_ref(global_ref);
- if (r != 0) {
+ if (r) {
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
- return r;
+ goto error_mem;
}
adev->mman.bo_global_ref.mem_glob =
@@ -102,26 +103,30 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
r = drm_global_item_ref(global_ref);
- if (r != 0) {
+ if (r) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&adev->mman.mem_global_ref);
- return r;
+ goto error_bo;
}
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
rq, amdgpu_sched_jobs);
- if (r != 0) {
+ if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
- drm_global_item_unref(&adev->mman.mem_global_ref);
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
- return r;
+ goto error_entity;
}
adev->mman.mem_global_referenced = true;
return 0;
+
+error_entity:
+ drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+error_bo:
+ drm_global_item_unref(&adev->mman.mem_global_ref);
+error_mem:
+ return r;
}
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
@@ -155,7 +160,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->func = &ttm_bo_manager_func;
+ man->func = &amdgpu_gtt_mgr_func;
man->gpu_offset = adev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
@@ -190,12 +195,13 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
+ unsigned i;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
@@ -204,28 +210,44 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
- rbo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->adev->mman.buffer_funcs_ring->ready == false)
- amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
- else
- amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
+ if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else {
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ for (i = 0; i < abo->placement.num_placement; ++i) {
+ if (!(abo->placements[i].flags &
+ TTM_PL_FLAG_TT))
+ continue;
+
+ if (abo->placements[i].lpfn)
+ continue;
+
+ /* set an upper limit to force directly
+ * allocating address space for the BO.
+ */
+ abo->placements[i].lpfn =
+ abo->adev->mc.gtt_size >> PAGE_SHIFT;
+ }
+ }
break;
case TTM_PL_TT:
default:
- amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
}
- *placement = rbo->placement;
+ *placement = abo->placement;
}
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ return drm_vma_node_verify_access(&abo->gem_base.vma_node,
+ filp->private_data);
}
static void amdgpu_move_null(struct ttm_buffer_object *bo,
@@ -251,26 +273,30 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
- case TTM_PL_VRAM:
- old_start += adev->mc.vram_start;
- break;
case TTM_PL_TT:
- old_start += adev->mc.gtt_start;
+ r = amdgpu_ttm_bind(bo, old_mem);
+ if (r)
+ return r;
+
+ case TTM_PL_VRAM:
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
switch (new_mem->mem_type) {
- case TTM_PL_VRAM:
- new_start += adev->mc.vram_start;
- break;
case TTM_PL_TT:
- new_start += adev->mc.gtt_start;
+ r = amdgpu_ttm_bind(bo, new_mem);
+ if (r)
+ return r;
+
+ case TTM_PL_VRAM:
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
+ new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -285,7 +311,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_copy_buffer(ring, old_start, new_start,
new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence);
+ bo->resv, &fence, false);
if (r)
return r;
@@ -314,7 +340,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = 0;
+ placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
@@ -335,7 +361,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -361,14 +387,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = 0;
+ placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -435,8 +461,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
if (r) {
return r;
}
@@ -524,6 +549,7 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
+ struct list_head list;
};
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
@@ -641,7 +667,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct amdgpu_ttm_tt *gtt = (void*)ttm;
- uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
int r;
if (gtt->userptr) {
@@ -651,7 +676,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
return r;
}
}
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
@@ -662,14 +686,71 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
+ return 0;
+}
+
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+ return gtt && !list_empty(&gtt->list);
+}
+
+int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
+{
+ struct ttm_tt *ttm = bo->ttm;
+ struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ uint32_t flags;
+ int r;
+
+ if (!ttm || amdgpu_ttm_is_bound(ttm))
+ return 0;
+
+ r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
+ NULL, bo_mem);
+ if (r) {
+ DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
+ return r;
+ }
+
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
+ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
- ttm->num_pages, (unsigned)gtt->offset);
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ ttm->num_pages, gtt->offset);
return r;
}
+ spin_lock(&gtt->adev->gtt_list_lock);
+ list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+ spin_unlock(&gtt->adev->gtt_list_lock);
+ return 0;
+}
+
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+{
+ struct amdgpu_ttm_tt *gtt, *tmp;
+ struct ttm_mem_reg bo_mem;
+ uint32_t flags;
+ int r;
+
+ bo_mem.mem_type = TTM_PL_TT;
+ spin_lock(&adev->gtt_list_lock);
+ list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
+ r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+ gtt->ttm.ttm.pages, gtt->ttm.dma_address,
+ flags);
+ if (r) {
+ spin_unlock(&adev->gtt_list_lock);
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ gtt->ttm.ttm.num_pages, gtt->offset);
+ return r;
+ }
+ }
+ spin_unlock(&adev->gtt_list_lock);
return 0;
}
@@ -677,12 +758,19 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ if (gtt->userptr)
+ amdgpu_ttm_tt_unpin_userptr(ttm);
+
+ if (!amdgpu_ttm_is_bound(ttm))
+ return 0;
+
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
if (gtt->adev->gart.ready)
amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
- if (gtt->userptr)
- amdgpu_ttm_tt_unpin_userptr(ttm);
+ spin_lock(&gtt->adev->gtt_list_lock);
+ list_del_init(&gtt->list);
+ spin_unlock(&gtt->adev->gtt_list_lock);
return 0;
}
@@ -720,6 +808,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
kfree(gtt);
return NULL;
}
+ INIT_LIST_HEAD(&gtt->list);
return &gtt->ttm.ttm;
}
@@ -950,6 +1039,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
struct list_head *res = lru->lru[tbo->mem.mem_type];
lru->lru[tbo->mem.mem_type] = &tbo->lru;
+ while ((++lru)->lru[tbo->mem.mem_type] == res)
+ lru->lru[tbo->mem.mem_type] = &tbo->lru;
return res;
}
@@ -960,6 +1051,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
struct list_head *res = lru->swap_lru;
lru->swap_lru = &tbo->swap;
+ while ((++lru)->swap_lru == res)
+ lru->swap_lru = &tbo->swap;
return res;
}
@@ -987,10 +1080,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
unsigned i, j;
int r;
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
adev->mman.bo_global_ref.ref.object,
@@ -1011,6 +1100,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
}
+ for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+ adev->mman.guard.lru[j] = NULL;
+ adev->mman.guard.swap_lru = NULL;
+
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
@@ -1151,7 +1244,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence)
+ struct fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1195,8 +1288,79 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
+ if (direct_submit) {
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
+ NULL, NULL, fence);
+ job->fence = fence_get(*fence);
+ if (r)
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ if (r)
+ goto error_free;
+ }
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct reservation_object *resv,
+ struct fence **fence)
+{
+ struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+ uint32_t max_bytes, byte_count;
+ uint64_t dst_offset;
+ unsigned int num_loops, num_dw;
+ unsigned int i;
+ int r;
+
+ byte_count = bo->tbo.num_pages << PAGE_SHIFT;
+ max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+ num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+ /* for IB padding */
+ while (num_dw & 0x7)
+ num_dw++;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ if (r)
+ return r;
+
+ if (resv) {
+ r = amdgpu_sync_resv(adev, &job->sync, resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
+ if (r) {
+ DRM_ERROR("sync failed (%d).\n", r);
+ goto error_free;
+ }
+ }
+
+ dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
+ for (i = 0; i < num_loops; i++) {
+ uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+ dst_offset, cur_size_in_bytes);
+
+ dst_offset += cur_size_in_bytes;
+ byte_count -= cur_size_in_bytes;
+ }
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
@@ -1387,3 +1551,8 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
#endif
}
+
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
+{
+ return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
new file mode 100644
index 000000000000..9812c805326c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_TTM_H__
+#define __AMDGPU_TTM_H__
+
+#include "gpu_scheduler.h"
+
+#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
+#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
+#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
+
+#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
+#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
+#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
+
+#define AMDGPU_TTM_LRU_SIZE 20
+
+struct amdgpu_mman_lru {
+ struct list_head *lru[TTM_NUM_MEM_TYPES];
+ struct list_head *swap_lru;
+};
+
+struct amdgpu_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_device bdev;
+ bool mem_global_referenced;
+ bool initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *vram;
+ struct dentry *gtt;
+#endif
+
+ /* buffer handling */
+ const struct amdgpu_buffer_funcs *buffer_funcs;
+ struct amdgpu_ring *buffer_funcs_ring;
+ /* Scheduler entity for buffer moves */
+ struct amd_sched_entity entity;
+
+ /* custom LRU management */
+ struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
+ /* guard for log2_size array, don't add anything in between */
+ struct amdgpu_mman_lru guard;
+};
+
+extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+
+int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem);
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count,
+ struct reservation_object *resv,
+ struct fence **fence, bool direct_submit);
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct reservation_object *resv,
+ struct fence **fence);
+
+int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
+int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5cc95f1a7dab..cb3d252f3c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,40 +247,32 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
- err = -ENOMEM;
goto failed;
}
err = amdgpu_bo_reserve(*bo, false);
if (err) {
- amdgpu_bo_unref(bo);
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
- goto failed;
+ goto failed_reserve;
}
err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
if (err) {
- amdgpu_bo_unreserve(*bo);
- amdgpu_bo_unref(bo);
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
- goto failed;
+ goto failed_pin;
}
err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
- amdgpu_bo_unpin(*bo);
- amdgpu_bo_unreserve(*bo);
- amdgpu_bo_unref(bo);
- goto failed;
+ goto failed_kmap;
}
amdgpu_bo_unreserve(*bo);
- fw_offset = 0;
for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -290,10 +282,16 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
+ return 0;
+failed_kmap:
+ amdgpu_bo_unpin(*bo);
+failed_pin:
+ amdgpu_bo_unreserve(*bo);
+failed_reserve:
+ amdgpu_bo_unref(bo);
failed:
- if (err)
- adev->firmware.smu_load = false;
+ adev->firmware.smu_load = false;
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b11f4e8868d7..e3281cacc586 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -201,39 +201,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
- r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, &adev->uvd.vcpu_bo);
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
+ &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (r) {
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
- dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
- return r;
- }
-
- r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->uvd.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
- dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
- if (r) {
- dev_err(adev->dev, "(%d) UVD map failed\n", r);
- return r;
- }
-
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-
ring = &adev->uvd.ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
@@ -274,22 +249,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
- int r;
-
kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
- if (adev->uvd.vcpu_bo) {
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (!r) {
- amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
- amdgpu_bo_unpin(adev->uvd.vcpu_bo);
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- }
-
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
- }
+ amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
+ &adev->uvd.gpu_addr,
+ (void **)&adev->uvd.cpu_addr);
amdgpu_ring_fini(&adev->uvd.ring);
@@ -323,7 +289,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.saved_bo)
return -ENOMEM;
- memcpy(adev->uvd.saved_bo, ptr, size);
+ memcpy_fromio(adev->uvd.saved_bo, ptr, size);
return 0;
}
@@ -340,7 +306,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.cpu_addr;
if (adev->uvd.saved_bo != NULL) {
- memcpy(ptr, adev->uvd.saved_bo, size);
+ memcpy_toio(ptr, adev->uvd.saved_bo, size);
kfree(adev->uvd.saved_bo);
adev->uvd.saved_bo = NULL;
} else {
@@ -349,11 +315,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
- (adev->uvd.fw->size) - offset);
+ memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
- memset(ptr, 0, size);
+ memset_io(ptr, 0, size);
}
return 0;
@@ -385,12 +351,12 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
}
-static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
+static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
{
int i;
- for (i = 0; i < rbo->placement.num_placement; ++i) {
- rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
- rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+ for (i = 0; i < abo->placement.num_placement; ++i) {
+ abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
+ abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
}
}
@@ -843,6 +809,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
return r;
break;
case mmUVD_ENGINE_CNTL:
+ case mmUVD_NO_OP:
break;
default:
DRM_ERROR("Invalid reg 0x%X!\n", reg);
@@ -915,6 +882,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
+ r = amdgpu_cs_sysvm_access_required(parser);
+ if (r)
+ return r;
+
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
@@ -981,8 +952,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->ptr[3] = addr >> 32;
ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
ib->ptr[5] = 0;
- for (i = 6; i < 16; ++i)
- ib->ptr[i] = PACKET2(0);
+ for (i = 6; i < 16; i += 2) {
+ ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0);
+ ib->ptr[i+1] = 0;
+ }
ib->length_dw = 16;
if (direct) {
@@ -1114,15 +1087,9 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.idle_work.work);
- unsigned i, fences, handles = 0;
+ unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
- fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
-
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.handles[i]))
- ++handles;
-
- if (fences == 0 && handles == 0) {
+ if (fences == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
} else {
@@ -1187,7 +1154,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
-error:
fence_put(fence);
+
+error:
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 05865ce35351..7fe8fd884f06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -210,6 +210,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
*/
int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
{
+ unsigned i;
+
if (adev->vce.vcpu_bo == NULL)
return 0;
@@ -217,8 +219,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
amdgpu_bo_unref(&adev->vce.vcpu_bo);
- amdgpu_ring_fini(&adev->vce.ring[0]);
- amdgpu_ring_fini(&adev->vce.ring[1]);
+ for (i = 0; i < adev->vce.num_rings; i++)
+ amdgpu_ring_fini(&adev->vce.ring[i]);
release_firmware(adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
@@ -282,8 +284,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(cpu_addr, (adev->vce.fw->data) + offset,
- (adev->vce.fw->size) - offset);
+ memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ adev->vce.fw->size - offset);
amdgpu_bo_kunmap(adev->vce.vcpu_bo);
@@ -303,9 +305,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vce.idle_work.work);
+ unsigned i, count = 0;
+
+ for (i = 0; i < adev->vce.num_rings; i++)
+ count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
- if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
- (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
+ if (count == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, false);
} else {
@@ -634,7 +639,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- int i, r = 0, idx = 0;
+ int i, r, idx = 0;
+
+ r = amdgpu_cs_sysvm_access_required(p);
+ if (r)
+ return r;
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
@@ -687,6 +696,21 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
case 0x04000008: /* rdo */
case 0x04000009: /* vui */
case 0x05000002: /* auxiliary buffer */
+ case 0x05000009: /* clock table */
+ break;
+
+ case 0x0500000c: /* hw config */
+ switch (p->adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_KAVERI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_CARRIZO:
+ break;
+ default:
+ r = -EINVAL;
+ goto out;
+ }
break;
case 0x03000001: /* encode */
@@ -799,6 +823,18 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END);
}
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* amdgpu_vce_ring_emit_ib */
+}
+
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
+}
+
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
@@ -850,8 +886,8 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct fence *fence = NULL;
long r;
- /* skip vce ring1 ib test for now, since it's not reliable */
- if (ring == &ring->adev->vce.ring[1])
+ /* skip vce ring1/2 ib test for now, since it's not reliable */
+ if (ring != &ring->adev->vce.ring[0])
return 0;
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 63f83d0d985c..12729d2852df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -42,5 +42,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 88d68cb6e89d..2c37a374917f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -19,22 +19,39 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Author: Monk.liu@amd.com
*/
+#ifndef AMDGPU_VIRT_H
+#define AMDGPU_VIRT_H
-#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
-#define _POLARIS10_CLOCK_POWER_GATING_H_
+#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
+#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
+#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
+#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
+/* GPU virtualization */
+struct amdgpu_virtualization {
+ uint32_t virtual_caps;
+};
-#include "polaris10_hwmgr.h"
-#include "pp_asicblocks.h"
+#define amdgpu_sriov_enabled(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
- const uint32_t *msg_id);
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+#define amdgpu_sriov_vf(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
-#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */
+#define amdgpu_sriov_bios(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
+
+#define amdgpu_passthrough(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
+
+static inline bool is_virtual_machine(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8e642fc48df4..06f24322e7c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -51,19 +51,22 @@
* SI supports 16.
*/
-/* Special value that no flush is necessary */
-#define AMDGPU_VM_NO_FLUSH (~0ll)
-
/* Local structure. Encapsulate some VM table update parameters to reduce
* the number of function parameters
*/
-struct amdgpu_vm_update_params {
+struct amdgpu_pte_update_params {
+ /* amdgpu device we do this update for */
+ struct amdgpu_device *adev;
/* address where to copy page table entries from */
uint64_t src;
- /* DMA addresses to use for mapping */
- dma_addr_t *pages_addr;
/* indirect buffer to fill with commands */
struct amdgpu_ib *ib;
+ /* Function which actually does the update */
+ void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint32_t flags);
+ /* indicate update pt or its shadow */
+ bool shadow;
};
/**
@@ -467,10 +470,9 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_update_pages - helper to call the right asic function
+ * amdgpu_vm_do_set_ptes - helper to call the right asic function
*
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -480,32 +482,46 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* Traces the parameters and calls the right asic functions
* to setup the page table using the DMA.
*/
-static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
+static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
+ uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr,
+ uint32_t flags)
+{
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
+ if (count < 3) {
+ amdgpu_vm_write_pte(params->adev, params->ib, pe,
+ addr | flags, count, incr);
+
+ } else {
+ amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
+ * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint32_t flags)
{
- trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
-
- if (vm_update_params->src) {
- amdgpu_vm_copy_pte(adev, vm_update_params->ib,
- pe, (vm_update_params->src + (addr >> 12) * 8), count);
+ uint64_t src = (params->src + (addr >> 12) * 8);
- } else if (vm_update_params->pages_addr) {
- amdgpu_vm_write_pte(adev, vm_update_params->ib,
- vm_update_params->pages_addr,
- pe, addr, count, incr, flags);
- } else if (count < 3) {
- amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
- count, incr, flags);
+ trace_amdgpu_vm_copy_ptes(pe, src, count);
- } else {
- amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
- count, incr, flags);
- }
+ amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
}
/**
@@ -523,12 +539,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
struct fence *fence = NULL;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
unsigned entries;
uint64_t addr;
int r;
- memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -539,6 +554,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (r)
+ goto error;
+
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
@@ -546,9 +565,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
- vm_update_params.ib = &job->ibs[0];
- amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
- 0, 0);
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.ib = &job->ibs[0];
+ amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > 64);
@@ -577,55 +597,46 @@ error:
* Look up the physical address of the page that the pte resolves
* to and return the pointer for the page table entry.
*/
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
uint64_t result;
- if (pages_addr) {
- /* page table offset */
- result = pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ /* page table offset */
+ result = pages_addr[addr >> PAGE_SHIFT];
- } else {
- /* No mapping required */
- result = addr;
- }
+ /* in case cpu page size != gpu page size*/
+ result |= addr & (~PAGE_MASK);
result &= 0xFFFFFFFFFFFFF000ULL;
return result;
}
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ bool shadow)
{
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = vm->page_directory;
- uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
+ struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+ vm->page_directory;
+ uint64_t pd_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
struct fence *fence = NULL;
int r;
- memset(&vm_update_params, 0, sizeof(vm_update_params));
+ if (!pd)
+ return 0;
+
+ r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
+ if (r)
+ return r;
+
+ pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
/* padding, etc. */
@@ -638,7 +649,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (r)
return r;
- vm_update_params.ib = &job->ibs[0];
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -648,20 +661,34 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (bo == NULL)
continue;
+ if (bo->shadow) {
+ struct amdgpu_bo *shadow = bo->shadow;
+
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ }
+
pt = amdgpu_bo_gpu_offset(bo);
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
+ if (!shadow) {
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+ vm->page_tables[pt_idx].addr = pt;
+ } else {
+ if (vm->page_tables[pt_idx].shadow_addr == pt)
+ continue;
+ vm->page_tables[pt_idx].shadow_addr = pt;
+ }
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
- ((last_pt + incr * count) != pt)) {
+ ((last_pt + incr * count) != pt) ||
+ (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
- amdgpu_vm_update_pages(adev, &vm_update_params,
- last_pde, last_pt,
- count, incr,
- AMDGPU_PTE_VALID);
+ amdgpu_vm_do_set_ptes(&params, last_pde,
+ last_pt, count, incr,
+ AMDGPU_PTE_VALID);
}
count = 1;
@@ -673,15 +700,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, &vm_update_params,
- last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
+ amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
- if (vm_update_params.ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+ if (params.ib->length_dw != 0) {
+ amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(vm_update_params.ib->length_dw > ndw);
+ WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
@@ -703,92 +729,33 @@ error_free:
return r;
}
-/**
- * amdgpu_vm_frag_ptes - add fragment information to PTEs
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
*
* @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @pe_start: first PTE to handle
- * @pe_end: last PTE to handle
- * @addr: addr those PTEs should point to
- * @flags: hw mapping flags
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
*/
-static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
- uint64_t pe_start, uint64_t pe_end,
- uint64_t addr, uint32_t flags)
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
- /**
- * The MC L1 TLB supports variable sized pages, based on a fragment
- * field in the PTE. When this field is set to a non-zero value, page
- * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
- * flags are considered valid for all PTEs within the fragment range
- * and corresponding mappings are assumed to be physically contiguous.
- *
- * The L1 TLB can store a single PTE for the whole fragment,
- * significantly increasing the space available for translation
- * caching. This leads to large improvements in throughput when the
- * TLB is under pressure.
- *
- * The L2 TLB distributes small and large fragments into two
- * asymmetric partitions. The large fragment cache is significantly
- * larger. Thus, we try to use large fragments wherever possible.
- * Userspace can support this by aligning virtual base address and
- * allocation size to the fragment size.
- */
-
- /* SI and newer are optimized for 64KB */
- uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
- uint64_t frag_align = 0x80;
-
- uint64_t frag_start = ALIGN(pe_start, frag_align);
- uint64_t frag_end = pe_end & ~(frag_align - 1);
-
- unsigned count;
-
- /* Abort early if there isn't anything to do */
- if (pe_start == pe_end)
- return;
-
- /* system pages are non continuously */
- if (vm_update_params->src || vm_update_params->pages_addr ||
- !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
-
- count = (pe_end - pe_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
- addr, count, AMDGPU_GPU_PAGE_SIZE,
- flags);
- return;
- }
-
- /* handle the 4K area at the beginning */
- if (pe_start != frag_start) {
- count = (frag_start - pe_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
- count, AMDGPU_GPU_PAGE_SIZE, flags);
- addr += AMDGPU_GPU_PAGE_SIZE * count;
- }
-
- /* handle the area in the middle */
- count = (frag_end - frag_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
- AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
+ int r;
- /* handle the 4K area at the end */
- if (frag_end != pe_end) {
- addr += AMDGPU_GPU_PAGE_SIZE * count;
- count = (pe_end - frag_end) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
- count, AMDGPU_GPU_PAGE_SIZE, flags);
- }
+ r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+ if (r)
+ return r;
+ return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
}
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
@@ -797,16 +764,14 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
*
* Update the page tables in the range @start - @end.
*/
-static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
+static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
- uint64_t cur_pe_start, cur_pe_end, cur_dst;
+ uint64_t cur_pe_start, cur_nptes, cur_dst;
uint64_t addr; /* next GPU address to be updated */
uint64_t pt_idx;
struct amdgpu_bo *pt;
@@ -817,7 +782,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
pt = vm->page_tables[pt_idx].entry.robj;
-
+ if (params->shadow) {
+ if (!pt->shadow)
+ return;
+ pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ }
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
@@ -825,7 +794,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
cur_pe_start = amdgpu_bo_gpu_offset(pt);
cur_pe_start += (addr & mask) * 8;
- cur_pe_end = cur_pe_start + 8 * nptes;
+ cur_nptes = nptes;
cur_dst = dst;
/* for next ptb*/
@@ -836,6 +805,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
pt = vm->page_tables[pt_idx].entry.robj;
+ if (params->shadow) {
+ if (!pt->shadow)
+ return;
+ pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ }
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -845,19 +819,19 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
next_pe_start = amdgpu_bo_gpu_offset(pt);
next_pe_start += (addr & mask) * 8;
- if (cur_pe_end == next_pe_start) {
+ if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
+ ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
/* The next ptb is consecutive to current ptb.
- * Don't call amdgpu_vm_frag_ptes now.
+ * Don't call the update function now.
* Will update two ptbs together in future.
*/
- cur_pe_end += 8 * nptes;
+ cur_nptes += nptes;
} else {
- amdgpu_vm_frag_ptes(adev, vm_update_params,
- cur_pe_start, cur_pe_end,
- cur_dst, flags);
+ params->func(params, cur_pe_start, cur_dst, cur_nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
cur_pe_start = next_pe_start;
- cur_pe_end = next_pe_start + 8 * nptes;
+ cur_nptes = nptes;
cur_dst = dst;
}
@@ -866,8 +840,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
- cur_pe_end, cur_dst, flags);
+ params->func(params, cur_pe_start, cur_dst, cur_nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
+}
+
+/*
+ * amdgpu_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @dst: addr those PTEs should point to
+ * @flags: hw mapping flags
+ */
+static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_vm *vm,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
+{
+ /**
+ * The MC L1 TLB supports variable sized pages, based on a fragment
+ * field in the PTE. When this field is set to a non-zero value, page
+ * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+ * flags are considered valid for all PTEs within the fragment range
+ * and corresponding mappings are assumed to be physically contiguous.
+ *
+ * The L1 TLB can store a single PTE for the whole fragment,
+ * significantly increasing the space available for translation
+ * caching. This leads to large improvements in throughput when the
+ * TLB is under pressure.
+ *
+ * The L2 TLB distributes small and large fragments into two
+ * asymmetric partitions. The large fragment cache is significantly
+ * larger. Thus, we try to use large fragments wherever possible.
+ * Userspace can support this by aligning virtual base address and
+ * allocation size to the fragment size.
+ */
+
+ /* SI and newer are optimized for 64KB */
+ uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
+ uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
+
+ uint64_t frag_start = ALIGN(start, frag_align);
+ uint64_t frag_end = end & ~(frag_align - 1);
+
+ /* system pages are non continuously */
+ if (params->src || !(flags & AMDGPU_PTE_VALID) ||
+ (frag_start >= frag_end)) {
+
+ amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
+ return;
+ }
+
+ /* handle the 4K area at the beginning */
+ if (start != frag_start) {
+ amdgpu_vm_update_ptes(params, vm, start, frag_start,
+ dst, flags);
+ dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
+ }
+
+ /* handle the area in the middle */
+ amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
+ flags | frag_flags);
+
+ /* handle the 4K area at the end */
+ if (frag_end != end) {
+ dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
+ amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
+ }
}
/**
@@ -900,14 +941,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
struct fence *f = NULL;
int r;
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.src = src;
+
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
- memset(&vm_update_params, 0, sizeof(vm_update_params));
- vm_update_params.src = src;
- vm_update_params.pages_addr = pages_addr;
+
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.src = src;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -924,30 +970,53 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
- if (vm_update_params.src) {
+ if (src) {
/* only copy commands needed */
ndw += ncmds * 7;
- } else if (vm_update_params.pages_addr) {
- /* header for write data commands */
- ndw += ncmds * 4;
+ params.func = amdgpu_vm_do_copy_ptes;
+
+ } else if (pages_addr) {
+ /* copy commands needed */
+ ndw += ncmds * 7;
- /* body of write data command */
+ /* and also PTEs */
ndw += nptes * 2;
+ params.func = amdgpu_vm_do_copy_ptes;
+
} else {
/* set page commands needed */
ndw += ncmds * 10;
/* two extra commands for begin/end of fragment */
ndw += 2 * 10;
+
+ params.func = amdgpu_vm_do_set_ptes;
}
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
- vm_update_params.ib = &job->ibs[0];
+ params.ib = &job->ibs[0];
+
+ if (!src && pages_addr) {
+ uint64_t *pte;
+ unsigned i;
+
+ /* Put the PTEs at the end of the IB. */
+ i = ndw - nptes * 2;
+ pte= (uint64_t *)&(job->ibs->ptr[i]);
+ params.src = job->ibs->gpu_addr + i * 4;
+
+ for (i = 0; i < nptes; ++i) {
+ pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
+ AMDGPU_GPU_PAGE_SIZE);
+ pte[i] |= flags;
+ }
+ addr = 0;
+ }
r = amdgpu_sync_fence(adev, &job->sync, exclusive);
if (r)
@@ -962,11 +1031,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
- last + 1, addr, flags);
+ params.shadow = true;
+ amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
+ params.shadow = false;
+ amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
- amdgpu_ring_pad_ib(ring, vm_update_params.ib);
- WARN_ON(vm_update_params.ib->length_dw > ndw);
+ amdgpu_ring_pad_ib(ring, params.ib);
+ WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &f);
if (r)
@@ -1062,28 +1133,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
- * @mem: ttm mem
+ * @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
* Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem)
+ bool clear)
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
+ struct ttm_mem_reg *mem;
struct fence *exclusive;
uint64_t addr;
int r;
- if (mem) {
+ if (clear) {
+ mem = NULL;
+ addr = 0;
+ exclusive = NULL;
+ } else {
struct ttm_dma_tt *ttm;
+ mem = &bo_va->bo->tbo.mem;
addr = (u64)mem->start << PAGE_SHIFT;
switch (mem->mem_type) {
case TTM_PL_TT:
@@ -1101,13 +1176,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
}
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
- } else {
- addr = 0;
- exclusive = NULL;
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
+ gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+ adev == bo_va->bo->adev) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -1134,7 +1207,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status);
- if (!mem)
+ if (clear)
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
@@ -1197,7 +1270,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
struct amdgpu_bo_va, vm_status);
spin_unlock(&vm->status_lock);
- r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, true);
if (r)
return r;
@@ -1342,7 +1415,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_SHADOW,
NULL, resv, &pt);
if (r)
goto error_free;
@@ -1354,10 +1428,20 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_vm_clear_bo(adev, vm, pt);
if (r) {
+ amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
goto error_free;
}
+ if (pt->shadow) {
+ r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
+ if (r) {
+ amdgpu_bo_unref(&pt->shadow);
+ amdgpu_bo_unref(&pt);
+ goto error_free;
+ }
+ }
+
entry->robj = pt;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
@@ -1535,13 +1619,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amd_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs);
if (r)
- return r;
+ goto err;
vm->page_directory_fence = NULL;
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_SHADOW,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@@ -1551,20 +1636,34 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
goto error_free_page_directory;
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
- amdgpu_bo_unreserve(vm->page_directory);
if (r)
- goto error_free_page_directory;
+ goto error_unreserve;
+
+ if (vm->page_directory->shadow) {
+ r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
+ if (r)
+ goto error_unreserve;
+ }
+
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+ amdgpu_bo_unreserve(vm->page_directory);
return 0;
+error_unreserve:
+ amdgpu_bo_unreserve(vm->page_directory);
+
error_free_page_directory:
+ amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
+err:
+ drm_free_large(vm->page_tables);
+
return r;
}
@@ -1597,10 +1696,18 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
kfree(mapping);
}
- for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
- amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
+ for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
+ struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+
+ if (!pt)
+ continue;
+
+ amdgpu_bo_unref(&pt->shadow);
+ amdgpu_bo_unref(&pt);
+ }
drm_free_large(vm->page_tables);
+ amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 49a39b1a0a96..f7d236f95e74 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -497,7 +497,13 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
* SetPixelClock provides the dividers
*/
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
- args.v6.ucPpll = ATOM_EXT_PLL1;
+ if (adev->asic_type == CHIP_TAHITI ||
+ adev->asic_type == CHIP_PITCAIRN ||
+ adev->asic_type == CHIP_VERDE ||
+ adev->asic_type == CHIP_OLAND)
+ args.v6.ucPpll = ATOM_PPLL0;
+ else
+ args.v6.ucPpll = ATOM_EXT_PLL1;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 7f85c2c1d681..f81068ba4cc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -88,7 +88,6 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
/* timeout */
if (args.v2.ucReplyStatus == 1) {
- DRM_DEBUG_KMS("dp_aux_ch timeout\n");
r = -ETIMEDOUT;
goto done;
}
@@ -339,22 +338,21 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
{
struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
- int ret, i;
+ int ret;
- for (i = 0; i < 7; i++) {
- ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
- DP_DPCD_SIZE);
- if (ret == DP_DPCD_SIZE) {
- memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+ ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV,
+ msg, DP_DPCD_SIZE);
+ if (ret == DP_DPCD_SIZE) {
+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
- dig_connector->dpcd);
+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+ dig_connector->dpcd);
- amdgpu_atombios_dp_probe_oui(amdgpu_connector);
+ amdgpu_atombios_dp_probe_oui(amdgpu_connector);
- return 0;
- }
+ return 0;
}
+
dig_connector->dpcd[0] = 0;
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index bc56c8a181e6..b374653bd6cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -27,6 +27,7 @@
#include "amdgpu.h"
#include "atom.h"
#include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
#define TARGET_HW_I2C_CLOCK 50
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index e2f0e5d58d5c..1d8c375a3561 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5396,7 +5396,7 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- ci_dpm_powergate_uvd(adev, false);
+ ci_dpm_powergate_uvd(adev, true);
if (!amdgpu_ci_is_smc_running(adev))
return;
@@ -5779,6 +5779,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
default: BUG();
}
@@ -5873,7 +5874,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->pcie_dpm_key_disabled = 0;
pi->thermal_sclk_dpm_enabled = 0;
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
pi->mclk_strobe_mode_threshold = 40000;
pi->mclk_stutter_mode_threshold = 40000;
@@ -6032,7 +6036,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->caps_dynamic_ac_timing = true;
- pi->uvd_power_gated = false;
+ pi->uvd_power_gated = true;
/* make sure dc limits are valid */
if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
@@ -6175,8 +6179,6 @@ static int ci_dpm_late_init(void *handle)
if (ret)
return ret;
- ci_dpm_powergate_uvd(adev, true);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4efc901f658c..a845b6a93b79 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,6 +67,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
+#include "dce_virtual.h"
/*
* Indirect registers accessor
@@ -962,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
-{
- /* CIK does not support SR-IOV */
- return 0;
-}
-
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
@@ -1640,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
+static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1708,6 +1709,74 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1776,6 +1845,74 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1844,6 +1981,74 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1912,6 +2117,74 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1980,32 +2253,128 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
+ if (adev->enable_virtual_display) {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->ip_blocks = bonaire_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
+ break;
+ case CHIP_HAWAII:
+ adev->ip_blocks = hawaii_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
+ break;
+ case CHIP_KAVERI:
+ adev->ip_blocks = kaveri_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
+ break;
+ case CHIP_KABINI:
+ adev->ip_blocks = kabini_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
+ break;
+ case CHIP_MULLINS:
+ adev->ip_blocks = mullins_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->ip_blocks = bonaire_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
+ break;
+ case CHIP_HAWAII:
+ adev->ip_blocks = hawaii_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
+ break;
+ case CHIP_KAVERI:
+ adev->ip_blocks = kaveri_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
+ break;
+ case CHIP_KABINI:
+ adev->ip_blocks = kabini_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
+ break;
+ case CHIP_MULLINS:
+ adev->ip_blocks = mullins_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
}
return 0;
@@ -2015,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom,
+ .detect_hw_virtualization = cik_detect_hw_virtualization,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
.set_vga_state = &cik_vga_set_state,
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
- .get_virtual_caps = &cik_get_virtual_caps,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ee6466912497..cb952acc7133 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -694,24 +695,16 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
- SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -719,39 +712,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
- SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -767,40 +748,21 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
@@ -886,6 +848,22 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
+static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 7 + 4; /* cik_sdma_ring_emit_ib */
+}
+
+static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 6 + /* cik_sdma_ring_emit_hdp_flush */
+ 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 6 + /* cik_sdma_ring_emit_pipeline_sync */
+ 12 + /* cik_sdma_ring_emit_vm_flush */
+ 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+}
+
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -1037,6 +1015,8 @@ static int cik_sdma_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_sdma_soft_reset(handle);
+
return cik_sdma_hw_init(adev);
}
@@ -1259,6 +1239,8 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
+ .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
+ .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index c4f6f00d62bc..8659852aea9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -562,4 +562,40 @@ enum {
MTYPE_NONCACHED = 3
};
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x) ((x) << 0)
+#define RB_MAP_PKR0_MASK (0x3 << 0)
+#define RB_MAP_PKR1(x) ((x) << 2)
+#define RB_MAP_PKR1_MASK (0x3 << 2)
+#define RB_XSEL2(x) ((x) << 4)
+#define RB_XSEL2_MASK (0x3 << 4)
+#define RB_XSEL (1 << 6)
+#define RB_YSEL (1 << 7)
+#define PKR_MAP(x) ((x) << 8)
+#define PKR_MAP_MASK (0x3 << 8)
+#define PKR_XSEL(x) ((x) << 10)
+#define PKR_XSEL_MASK (0x3 << 10)
+#define PKR_YSEL(x) ((x) << 12)
+#define PKR_YSEL_MASK (0x3 << 12)
+#define SC_MAP(x) ((x) << 16)
+#define SC_MAP_MASK (0x3 << 16)
+#define SC_XSEL(x) ((x) << 18)
+#define SC_XSEL_MASK (0x3 << 18)
+#define SC_YSEL(x) ((x) << 20)
+#define SC_YSEL_MASK (0x3 << 20)
+#define SE_MAP(x) ((x) << 24)
+#define SE_MAP_MASK (0x3 << 24)
+#define SE_XSEL(x) ((x) << 26)
+#define SE_XSEL_MASK (0x3 << 26)
+#define SE_YSEL(x) ((x) << 28)
+#define SE_YSEL_MASK (0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x) ((x) << 0)
+#define SE_PAIR_MAP_MASK (0x3 << 0)
+#define SE_PAIR_XSEL(x) ((x) << 2)
+#define SE_PAIR_XSEL_MASK (0x3 << 2)
+#define SE_PAIR_YSEL(x) ((x) << 4)
+#define SE_PAIR_YSEL_MASK (0x3 << 4)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 2a11413ed54a..f80a0834e889 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -44,6 +44,7 @@
static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
+static void cz_dpm_fini(struct amdgpu_device *adev);
static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
{
@@ -350,6 +351,8 @@ static int cz_parse_power_table(struct amdgpu_device *adev)
ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
if (ps == NULL) {
+ for (j = 0; j < i; j++)
+ kfree(adev->pm.dpm.ps[j].ps_priv);
kfree(adev->pm.dpm.ps);
return -ENOMEM;
}
@@ -409,11 +412,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
ret = amdgpu_get_platform_caps(adev);
if (ret)
- return ret;
+ goto err;
ret = amdgpu_parse_extended_power_table(adev);
if (ret)
- return ret;
+ goto err;
pi->sram_end = SMC_RAM_END;
@@ -435,7 +438,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->caps_td_ramping = true;
pi->caps_tcp_ramping = true;
}
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
+
pi->voting_clients = 0x00c00033;
pi->auto_thermal_throttling_enabled = true;
pi->bapm_enabled = false;
@@ -463,23 +470,26 @@ static int cz_dpm_init(struct amdgpu_device *adev)
ret = cz_parse_sys_info_table(adev);
if (ret)
- return ret;
+ goto err;
cz_patch_voltage_values(adev);
cz_construct_boot_state(adev);
ret = cz_parse_power_table(adev);
if (ret)
- return ret;
+ goto err;
ret = cz_process_firmware_header(adev);
if (ret)
- return ret;
+ goto err;
pi->dpm_enabled = true;
pi->uvd_dynamic_pg = false;
return 0;
+err:
+ cz_dpm_fini(adev);
+ return ret;
}
static void cz_dpm_fini(struct amdgpu_device *adev)
@@ -668,17 +678,12 @@ static void cz_reset_ap_mask(struct amdgpu_device *adev)
struct cz_power_info *pi = cz_get_pi(adev);
pi->active_process_mask = 0;
-
}
static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
void **table)
{
- int ret = 0;
-
- ret = cz_smu_download_pptable(adev, table);
-
- return ret;
+ return cz_smu_download_pptable(adev, table);
}
static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
@@ -818,9 +823,9 @@ static void cz_init_sclk_limit(struct amdgpu_device *adev)
pi->sclk_dpm.hard_min_clk = 0;
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
level = cz_get_argument(adev);
- if (level < table->count)
+ if (level < table->count) {
clock = table->entries[level].clk;
- else {
+ } else {
DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
clock = table->entries[table->count - 1].clk;
}
@@ -846,9 +851,9 @@ static void cz_init_uvd_limit(struct amdgpu_device *adev)
pi->uvd_dpm.hard_min_clk = 0;
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
level = cz_get_argument(adev);
- if (level < table->count)
+ if (level < table->count) {
clock = table->entries[level].vclk;
- else {
+ } else {
DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
clock = table->entries[table->count - 1].vclk;
}
@@ -874,9 +879,9 @@ static void cz_init_vce_limit(struct amdgpu_device *adev)
pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
level = cz_get_argument(adev);
- if (level < table->count)
+ if (level < table->count) {
clock = table->entries[level].ecclk;
- else {
+ } else {
/* future BIOS would fix this error */
DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
clock = table->entries[table->count - 1].ecclk;
@@ -903,9 +908,9 @@ static void cz_init_acp_limit(struct amdgpu_device *adev)
pi->acp_dpm.hard_min_clk = 0;
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
level = cz_get_argument(adev);
- if (level < table->count)
+ if (level < table->count) {
clock = table->entries[level].clk;
- else {
+ } else {
DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
clock = table->entries[table->count - 1].clk;
}
@@ -930,7 +935,6 @@ static void cz_init_sclk_threshold(struct amdgpu_device *adev)
struct cz_power_info *pi = cz_get_pi(adev);
pi->low_sclk_interrupt_threshold = 0;
-
}
static void cz_dpm_setup_asic(struct amdgpu_device *adev)
@@ -1203,7 +1207,7 @@ static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
int ret;
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
- pi->caps_td_ramping || pi->caps_tcp_ramping) {
+ pi->caps_td_ramping || pi->caps_tcp_ramping) {
if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
ret = cz_disable_cgpg(adev);
if (ret) {
@@ -1277,7 +1281,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
ps->force_high = false;
ps->need_dfs_bypass = true;
pi->video_start = new_rps->dclk || new_rps->vclk ||
- new_rps->evclk || new_rps->ecclk;
+ new_rps->evclk || new_rps->ecclk;
if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
@@ -1335,7 +1339,6 @@ static int cz_dpm_enable(struct amdgpu_device *adev)
}
cz_reset_acp_boot_level(adev);
-
cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
return 0;
@@ -1665,7 +1668,6 @@ static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
struct amdgpu_ps *ps = &pi->requested_rps;
cz_update_current_ps(adev, ps);
-
}
static int cz_dpm_force_highest(struct amdgpu_device *adev)
@@ -2108,29 +2110,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
/* disable clockgating so we can properly shut down the block */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
+ return;
+ }
+
/* shutdown the UVD block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
- /* XXX: check for errors */
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n");
+ return;
+ }
}
cz_update_uvd_dpm(adev, gate);
- if (pi->caps_uvd_pg)
+ if (pi->caps_uvd_pg) {
/* power off the UVD block */
- cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+ ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n");
+ return;
+ }
+ }
} else {
if (pi->caps_uvd_pg) {
/* power on the UVD block */
if (pi->uvd_dynamic_pg)
- cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
+ ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
else
- cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+ ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n");
+ return;
+ }
+
/* re-init the UVD block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n");
+ return;
+ }
+
/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
- /* XXX: check for errors */
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
+ return;
+ }
}
cz_update_uvd_dpm(adev, gate);
}
@@ -2168,7 +2199,6 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev)
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
if (pi->caps_stable_power_state) {
pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
-
} else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
/* leave it as set by user */
/*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index ac7fee7b7eca..aed7033c0973 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -29,6 +29,8 @@
#include "cz_smumgr.h"
#include "smu_ucode_xfer_cz.h"
#include "amdgpu_ucode.h"
+#include "cz_dpm.h"
+#include "vi_dpm.h"
#include "smu/smu_8_0_d.h"
#include "smu/smu_8_0_sh_mask.h"
@@ -48,7 +50,7 @@ static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
return priv;
}
-int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
+static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
{
int i;
u32 content = 0, tmp;
@@ -99,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
return 0;
}
-int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
- u16 msg, u32 parameter)
-{
- WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
- return cz_send_msg_to_smc_async(adev, msg);
-}
-
int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
u16 msg, u32 parameter)
{
@@ -140,7 +135,7 @@ int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
return 0;
}
-int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
u32 value, u32 limit)
{
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c1b04e9aab57..613ebb7ed50f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/
static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
- unsigned i = 0;
+ unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
@@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame.
*/
while (dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc))
break;
}
@@ -425,16 +427,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
-
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
idx = 0;
@@ -458,6 +450,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ continue;
+ }
+
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
@@ -646,8 +651,8 @@ static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
if (save->crtc_enabled[i]) {
tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+ if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
+ tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
@@ -712,6 +717,45 @@ static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ num_crtc = 6;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v10_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v10_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -2063,7 +2107,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
@@ -2071,6 +2115,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2090,23 +2135,23 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
- rbo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(rbo);
+ fb_location = amdgpu_bo_gpu_offset(abo);
} else {
- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
- amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@@ -2182,8 +2227,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2275,17 +2321,17 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@@ -2698,7 +2744,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.gamma_set = dce_v10_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2765,16 +2811,16 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
+ DRM_ERROR("failed to reserve abo before unpin\n");
else {
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */
@@ -2962,10 +3008,11 @@ static int dce_v10_0_early_init(void *handle)
dce_v10_0_set_display_funcs(adev);
dce_v10_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_FIJI:
case CHIP_TONGA:
- adev->mode_info.num_crtc = 6; /* XXX 7??? */
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
break;
@@ -3141,11 +3188,26 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
+static int dce_v10_0_check_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (dce_v10_0_is_display_hung(adev))
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
+ else
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
+
+ return 0;
+}
+
static int dce_v10_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
+ return 0;
+
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3512,6 +3574,7 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.resume = dce_v10_0_resume,
.is_idle = dce_v10_0_is_idle,
.wait_for_idle = dce_v10_0_wait_for_idle,
+ .check_soft_reset = dce_v10_0_check_soft_reset,
.soft_reset = dce_v10_0_soft_reset,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index 1bfa48ddd8a6..e3dc04d293e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+void dce_v10_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d4bf133908b1..f264b8f17ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -443,16 +443,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
-
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
idx = 0;
@@ -476,6 +466,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ continue;
+ }
+
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
@@ -673,6 +676,53 @@ static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ num_crtc = 3;
+ break;
+ case CHIP_STONEY:
+ num_crtc = 2;
+ break;
+ case CHIP_POLARIS10:
+ num_crtc = 6;
+ break;
+ case CHIP_POLARIS11:
+ num_crtc = 5;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v11_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v11_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -2038,7 +2088,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
@@ -2046,6 +2096,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2065,23 +2116,23 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
- rbo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(rbo);
+ fb_location = amdgpu_bo_gpu_offset(abo);
} else {
- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
- amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@@ -2157,8 +2208,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2250,17 +2302,17 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@@ -2708,7 +2760,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.gamma_set = dce_v11_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2775,16 +2827,16 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
+ DRM_ERROR("failed to reserve abo before unpin\n");
else {
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */
@@ -2999,24 +3051,22 @@ static int dce_v11_0_early_init(void *handle)
dce_v11_0_set_display_funcs(adev);
dce_v11_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_CARRIZO:
- adev->mode_info.num_crtc = 3;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_STONEY:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_POLARIS10:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_POLARIS11:
- adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
@@ -3109,6 +3159,7 @@ static int dce_v11_0_sw_fini(void *handle)
dce_v11_0_afmt_fini(adev);
+ drm_mode_config_cleanup(adev->ddev);
adev->mode_info.mode_config_initialized = false;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 84e4618f5253..1f58a65ba2ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
new file mode 100644
index 000000000000..b948d6cb1399
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -0,0 +1,3176 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#include "si/si_reg.h"
+#include "si/sid.h"
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[6] =
+{
+ SI_CRTC0_REGISTER_OFFSET,
+ SI_CRTC1_REGISTER_OFFSET,
+ SI_CRTC2_REGISTER_OFFSET,
+ SI_CRTC3_REGISTER_OFFSET,
+ SI_CRTC4_REGISTER_OFFSET,
+ SI_CRTC5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+ SI_CRTC0_REGISTER_OFFSET,
+ SI_CRTC1_REGISTER_OFFSET,
+ SI_CRTC2_REGISTER_OFFSET,
+ SI_CRTC3_REGISTER_OFFSET,
+ SI_CRTC4_REGISTER_OFFSET,
+ SI_CRTC5_REGISTER_OFFSET,
+ (0x13830 - 0x7030) >> 2,
+};
+
+static const struct {
+ uint32_t reg;
+ uint32_t vblank;
+ uint32_t vline;
+ uint32_t hpd;
+
+} interrupt_status_offsets[6] = { {
+ .reg = DISP_INTERRUPT_STATUS,
+ .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const uint32_t hpd_int_control_offsets[6] = {
+ DC_HPD1_INT_CONTROL,
+ DC_HPD2_INT_CONTROL,
+ DC_HPD3_INT_CONTROL,
+ DC_HPD4_INT_CONTROL,
+ DC_HPD5_INT_CONTROL,
+ DC_HPD6_INT_CONTROL,
+};
+
+static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
+ u32 block_offset, u32 reg)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
+ return 0;
+}
+
+static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
+ u32 block_offset, u32 reg, u32 v)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
+}
+
+static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v6_0_wait_for_vblank - vblank wait asic callback.
+ *
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+ unsigned i = 100;
+
+ if (crtc >= adev->mode_info.num_crtc)
+ return;
+
+ if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (dce_v6_0_is_in_vblank(adev, crtc)) {
+ if (i++ == 100) {
+ i = 0;
+ if (!dce_v6_0_is_counter_moving(adev, crtc))
+ break;
+ }
+ }
+
+ while (!dce_v6_0_is_in_vblank(adev, crtc)) {
+ if (i++ == 100) {
+ i = 0;
+ if (!dce_v6_0_is_counter_moving(adev, crtc))
+ break;
+ }
+ }
+}
+
+static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else
+ return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ /* Enable pflip interrupts */
+ for (i = 0; i < adev->mode_info.num_crtc; i++)
+ amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ /* Disable pflip interrupts */
+ for (i = 0; i < adev->mode_info.num_crtc; i++)
+ amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
+/**
+ * dce_v6_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v6_0_page_flip(struct amdgpu_device *adev,
+ int crtc_id, u64 crtc_base, bool async)
+{
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+ /* flip at hsync for async, default is vsync */
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+ EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+ /* update the scanout addresses */
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* post the write */
+ RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+}
+
+static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+ *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+ *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ return 0;
+
+}
+
+/**
+ * dce_v6_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case AMDGPU_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
+ return connected;
+}
+
+/**
+ * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = dce_v6_0_hpd_sense(adev, hpd);
+
+ switch (hpd) {
+ case AMDGPU_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * dce_v6_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+ break;
+ default:
+ continue;
+ }
+
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ continue;
+ }
+
+ dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+ amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+ }
+
+}
+
+/**
+ * dce_v6_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+ amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+ }
+}
+
+static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+ return SI_DC_GPIO_HPD_A;
+}
+
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+ DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
+
+ return true;
+}
+
+static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else
+ return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 crtc_enabled, tmp, frame_count;
+ int i, j;
+
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
+
+ /* blank the display controllers */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+
+ if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+ dce_v6_0_vblank_wait(adev, i);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ /* wait for the next frame */
+ frame_count = evergreen_get_vblank_counter(adev, i);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (evergreen_get_vblank_counter(adev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+}
+
+static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 tmp;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(adev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(adev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)adev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)adev->mc.vram_start);
+ }
+
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+
+ /* unlock regs and wait for update */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < adev->usec_timeout; j++) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
+ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+
+}
+
+static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
+ bool render)
+{
+ if (!render)
+ WREG32(R_000300_VGA_RENDER_CONTROL,
+ RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+
+}
+
+static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ bpc = amdgpu_connector_get_monitor_bpc(connector);
+ dither = amdgpu_connector->dither;
+ }
+
+ /* LVDS FMT is set up by atom */
+ if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ if (bpc == 0)
+ return;
+
+
+ switch (bpc) {
+ case 6:
+ if (dither == AMDGPU_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN);
+ else
+ tmp |= FMT_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == AMDGPU_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_RGB_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+ switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
+ case 0:
+ default:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ case 4:
+ return 3;
+ case 5:
+ return 6;
+ case 6:
+ return 10;
+ case 7:
+ return 12;
+ case 8:
+ return 16;
+ }
+}
+
+struct dce6_wm_params {
+ u32 dram_channels; /* number of dram channels */
+ u32 yclk; /* bandwidth per dram data pin in kHz */
+ u32 sclk; /* engine clock in kHz */
+ u32 disp_clk; /* display clock in kHz */
+ u32 src_width; /* viewport width */
+ u32 active_time; /* active display time in ns */
+ u32 blank_time; /* blank time in ns */
+ bool interlaced; /* mode is interlaced */
+ fixed20_12 vsc; /* vertical scale ratio */
+ u32 num_heads; /* number of active crtcs */
+ u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+ u32 lb_size; /* line buffer allocated to pipe */
+ u32 vtaps; /* vertical scaler taps */
+};
+
+/**
+ * dce_v6_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate raw DRAM Bandwidth */
+ fixed20_12 dram_efficiency; /* 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ dram_efficiency.full = dfixed_const(7);
+ dram_efficiency.full = dfixed_div(dram_efficiency, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+ disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the display Data return Bandwidth */
+ fixed20_12 return_efficiency; /* 0.8 */
+ fixed20_12 sclk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ sclk.full = dfixed_const(wm->sclk);
+ sclk.full = dfixed_div(sclk, a);
+ a.full = dfixed_const(10);
+ return_efficiency.full = dfixed_const(8);
+ return_efficiency.full = dfixed_div(return_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, sclk);
+ bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the DMIF Request Bandwidth */
+ fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+ fixed20_12 disp_clk, bandwidth;
+ fixed20_12 a, b;
+
+ a.full = dfixed_const(1000);
+ disp_clk.full = dfixed_const(wm->disp_clk);
+ disp_clk.full = dfixed_div(disp_clk, a);
+ a.full = dfixed_const(32);
+ b.full = dfixed_mul(a, disp_clk);
+
+ a.full = dfixed_const(10);
+ disp_clk_request_efficiency.full = dfixed_const(8);
+ disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+ bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+ u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
+ u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
+ u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
+
+ return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v6_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the display mode Average Bandwidth
+ * DisplayMode should contain the source and destination dimensions,
+ * timing, etc.
+ */
+ fixed20_12 bpp;
+ fixed20_12 line_time;
+ fixed20_12 src_width;
+ fixed20_12 bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+ line_time.full = dfixed_div(line_time, a);
+ bpp.full = dfixed_const(wm->bytes_per_pixel);
+ src_width.full = dfixed_const(wm->src_width);
+ bandwidth.full = dfixed_mul(src_width, bpp);
+ bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+ bandwidth.full = dfixed_div(bandwidth, line_time);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
+{
+ /* First calculate the latency in ns */
+ u32 mc_latency = 2000; /* 2000 ns. */
+ u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
+ u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+ u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+ u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+ u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+ (wm->num_heads * cursor_line_pair_return_time);
+ u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+ u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+ u32 tmp, dmif_size = 12288;
+ fixed20_12 a, b, c;
+
+ if (wm->num_heads == 0)
+ return 0;
+
+ a.full = dfixed_const(2);
+ b.full = dfixed_const(1);
+ if ((wm->vsc.full > a.full) ||
+ ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+ (wm->vtaps >= 5) ||
+ ((wm->vsc.full >= a.full) && wm->interlaced))
+ max_src_lines_per_dst_line = 4;
+ else
+ max_src_lines_per_dst_line = 2;
+
+ a.full = dfixed_const(available_bandwidth);
+ b.full = dfixed_const(wm->num_heads);
+ a.full = dfixed_div(a, b);
+
+ b.full = dfixed_const(mc_latency + 512);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(b, c);
+
+ c.full = dfixed_const(dmif_size);
+ b.full = dfixed_div(c, b);
+
+ tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(c, b);
+ c.full = dfixed_const(wm->bytes_per_pixel);
+ b.full = dfixed_mul(b, c);
+
+ lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+ a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(lb_fill_bw);
+ b.full = dfixed_div(c, b);
+ a.full = dfixed_div(a, b);
+ line_fill_time = dfixed_trunc(a);
+
+ if (line_fill_time < wm->active_time)
+ return latency;
+ else
+ return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+ if (dce_v6_0_average_bandwidth(wm) <=
+ (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+ if (dce_v6_0_average_bandwidth(wm) <=
+ (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v6_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
+{
+ u32 lb_partitions = wm->lb_size / wm->src_width;
+ u32 line_time = wm->active_time + wm->blank_time;
+ u32 latency_tolerant_lines;
+ u32 latency_hiding;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1);
+ if (wm->vsc.full > a.full)
+ latency_tolerant_lines = 1;
+ else {
+ if (lb_partitions <= (wm->vtaps + 1))
+ latency_tolerant_lines = 1;
+ else
+ latency_tolerant_lines = 2;
+ }
+
+ latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+ if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v6_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
+ struct amdgpu_crtc *amdgpu_crtc,
+ u32 lb_size, u32 num_heads)
+{
+ struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+ struct dce6_wm_params wm_low, wm_high;
+ u32 dram_channels;
+ u32 pixel_period;
+ u32 line_time = 0;
+ u32 latency_watermark_a = 0, latency_watermark_b = 0;
+ u32 priority_a_mark = 0, priority_b_mark = 0;
+ u32 priority_a_cnt = PRIORITY_OFF;
+ u32 priority_b_cnt = PRIORITY_OFF;
+ u32 tmp, arb_control3;
+ fixed20_12 a, b, c;
+
+ if (amdgpu_crtc->base.enabled && num_heads && mode) {
+ pixel_period = 1000000 / (u32)mode->clock;
+ line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ priority_a_cnt = 0;
+ priority_b_cnt = 0;
+
+ dram_channels = si_get_number_of_dram_channels(adev);
+
+ /* watermark for high clocks */
+ if (adev->pm.dpm_enabled) {
+ wm_high.yclk =
+ amdgpu_dpm_get_mclk(adev, false) * 10;
+ wm_high.sclk =
+ amdgpu_dpm_get_sclk(adev, false) * 10;
+ } else {
+ wm_high.yclk = adev->pm.current_mclk * 10;
+ wm_high.sclk = adev->pm.current_sclk * 10;
+ }
+
+ wm_high.disp_clk = mode->clock;
+ wm_high.src_width = mode->crtc_hdisplay;
+ wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.blank_time = line_time - wm_high.active_time;
+ wm_high.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_high.interlaced = true;
+ wm_high.vsc = amdgpu_crtc->vsc;
+ wm_high.vtaps = 1;
+ if (amdgpu_crtc->rmx_type != RMX_OFF)
+ wm_high.vtaps = 2;
+ wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_high.lb_size = lb_size;
+ wm_high.dram_channels = dram_channels;
+ wm_high.num_heads = num_heads;
+
+ if (adev->pm.dpm_enabled) {
+ /* watermark for low clocks */
+ wm_low.yclk =
+ amdgpu_dpm_get_mclk(adev, true) * 10;
+ wm_low.sclk =
+ amdgpu_dpm_get_sclk(adev, true) * 10;
+ } else {
+ wm_low.yclk = adev->pm.current_mclk * 10;
+ wm_low.sclk = adev->pm.current_sclk * 10;
+ }
+
+ wm_low.disp_clk = mode->clock;
+ wm_low.src_width = mode->crtc_hdisplay;
+ wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.blank_time = line_time - wm_low.active_time;
+ wm_low.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_low.interlaced = true;
+ wm_low.vsc = amdgpu_crtc->vsc;
+ wm_low.vtaps = 1;
+ if (amdgpu_crtc->rmx_type != RMX_OFF)
+ wm_low.vtaps = 2;
+ wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_low.lb_size = lb_size;
+ wm_low.dram_channels = dram_channels;
+ wm_low.num_heads = num_heads;
+
+ /* set for high clocks */
+ latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
+ /* set for low clocks */
+ latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+ !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+ !dce_v6_0_check_latency_hiding(&wm_high) ||
+ (adev->mode_info.disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ priority_a_cnt |= PRIORITY_ALWAYS_ON;
+ priority_b_cnt |= PRIORITY_ALWAYS_ON;
+ }
+ if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+ !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+ !dce_v6_0_check_latency_hiding(&wm_low) ||
+ (adev->mode_info.disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ priority_a_cnt |= PRIORITY_ALWAYS_ON;
+ priority_b_cnt |= PRIORITY_ALWAYS_ON;
+ }
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_a);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_a_mark = dfixed_trunc(c);
+ priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_b);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_b_mark = dfixed_trunc(c);
+ priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+ }
+
+ /* select wm A */
+ arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+ tmp = arb_control3;
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(1);
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* select wm B */
+ tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(2);
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* restore original selection */
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+
+ /* write the priority marks */
+ WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+ WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+
+ /* save values for DPM */
+ amdgpu_crtc->line_time = line_time;
+ amdgpu_crtc->wm_high = latency_watermark_a;
+}
+
+/* watermark setup */
+static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
+ struct amdgpu_crtc *amdgpu_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *other_mode)
+{
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
+ /*
+ * Line Buffer Setup
+ * There are 3 line buffers, each one shared by 2 display controllers.
+ * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * the display controllers. The paritioning is done via one of four
+ * preset allocations specified in bits 21:20:
+ * 0 - half lb
+ * 2 - whole lb, other crtc must be disabled
+ */
+ /* this can get tricky if we have two large displays on a paired group
+ * of crtcs. Ideally for multiple large displays we'd assign them to
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (amdgpu_crtc->base.enabled && mode) {
+ if (other_mode) {
+ tmp = 0; /* 1/2 */
+ buffer_alloc = 1;
+ } else {
+ tmp = 2; /* whole */
+ buffer_alloc = 2;
+ }
+ } else {
+ tmp = 0;
+ buffer_alloc = 0;
+ }
+
+ WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+ DC_LB_MEMORY_CONFIG(tmp));
+
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
+ if (amdgpu_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ default:
+ return 4096 * 2;
+ case 2:
+ return 8192 * 2;
+ }
+ }
+
+ /* controller not enabled, so no lb used */
+ return 0;
+}
+
+
+/**
+ *
+ * dce_v6_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
+{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 num_heads = 0, lb_size;
+ int i;
+
+ if (!adev->mode_info.mode_config_initialized)
+ return;
+
+ amdgpu_update_display_priority(adev);
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->mode_info.crtcs[i]->base.enabled)
+ num_heads++;
+ }
+ for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
+ mode0 = &adev->mode_info.crtcs[i]->base.mode;
+ mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
+ lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
+ dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
+ lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
+ dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
+ }
+}
+/*
+static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+ int i;
+ u32 offset, tmp;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ offset = adev->mode_info.audio.pin[i].offset;
+ tmp = RREG32_AUDIO_ENDPT(offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+ if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+ adev->mode_info.audio.pin[i].connected = false;
+ else
+ adev->mode_info.audio.pin[i].connected = true;
+ }
+
+}
+
+static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
+{
+ int i;
+
+ dce_v6_0_audio_get_connected_pins(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ if (adev->mode_info.audio.pin[i].connected)
+ return &adev->mode_info.audio.pin[i];
+ }
+ DRM_ERROR("No connected audio pins found!\n");
+ return NULL;
+}
+
+static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
+
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->offset;
+
+ WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+ AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+
+}
+
+static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
+
+}
+*/
+static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
+ struct amdgpu_audio_pin *pin,
+ bool enable)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
+}
+
+static const u32 pin_offsets[7] =
+{
+ (0x1780 - 0x1780),
+ (0x1786 - 0x1780),
+ (0x178c - 0x1780),
+ (0x1792 - 0x1780),
+ (0x1798 - 0x1780),
+ (0x179d - 0x1780),
+ (0x17a4 - 0x1780),
+};
+
+static int dce_v6_0_audio_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
+{
+
+}
+
+/*
+static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
+}
+*/
+/*
+ * build a HDMI Video Info Frame
+ */
+/*
+static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
+{
+ DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
+}
+*/
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
+}
+
+static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+ if (!enable && dig->afmt->pin) {
+ dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
+ dig->afmt->pin = NULL;
+ }
+
+ dig->afmt->enabled = enable;
+
+ DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < adev->mode_info.num_dig; i++)
+ adev->mode_info.afmt[i] = NULL;
+
+ /* DCE6 has audio blocks tied to DIG encoders */
+ for (i = 0; i < adev->mode_info.num_dig; i++) {
+ adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+ if (adev->mode_info.afmt[i]) {
+ adev->mode_info.afmt[i]->offset = dig_offsets[i];
+ adev->mode_info.afmt[i]->id = i;
+ } else {
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ DRM_ERROR("Out of memory allocating afmt table\n");
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->mode_info.num_dig; i++) {
+ kfree(adev->mode_info.afmt[i]);
+ adev->mode_info.afmt[i] = NULL;
+ }
+}
+
+static const u32 vga_control_regs[6] =
+{
+ AVIVO_D1VGA_CONTROL,
+ AVIVO_D2VGA_CONTROL,
+ EVERGREEN_D3VGA_CONTROL,
+ EVERGREEN_D4VGA_CONTROL,
+ EVERGREEN_D5VGA_CONTROL,
+ EVERGREEN_D6VGA_CONTROL,
+};
+
+static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ u32 vga_control;
+
+ vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+ WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
+}
+
+static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
+}
+
+static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_framebuffer *target_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *abo;
+ uint64_t fb_location, tiling_flags;
+ uint32_t fb_format, fb_pitch_pixels, pipe_config;
+ u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+ u32 viewport_w, viewport_h;
+ int r;
+ bool bypass_lut = false;
+
+ /* no fb bound */
+ if (!atomic && !crtc->primary->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ if (atomic) {
+ amdgpu_fb = to_amdgpu_framebuffer(fb);
+ target_fb = fb;
+ } else {
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ target_fb = crtc->primary->fb;
+ }
+
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
+ obj = amdgpu_fb->obj;
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ if (atomic) {
+ fb_location = amdgpu_bo_gpu_offset(abo);
+ } else {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(abo);
+ return -EINVAL;
+ }
+ }
+
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
+
+ switch (target_fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ break;
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_BGRA5551:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_RGB565:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_BGRA1010102:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format));
+ return -EINVAL;
+ }
+
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
+ unsigned bankw, bankh, mtaspect, tile_split, num_banks;
+
+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+ fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+ fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+ fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+ }
+
+ pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+ fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+
+ dce_v6_0_vga_enable(crtc, false);
+
+ /* Make sure surface address is updated at vertical blank rather than
+ * horizontal blank
+ */
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+ WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+
+ /*
+ * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+ * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+ * retain the full precision throughout the pipeline.
+ */
+ WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
+ (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+ ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+ if (bypass_lut)
+ DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+ dce_v6_0_grph_enable(crtc, true);
+
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+ target_fb->height);
+ x &= ~3;
+ y &= ~1;
+ WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+ (x << 16) | y);
+ viewport_w = crtc->mode.hdisplay;
+ viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+
+ WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+ (viewport_w << 16) | viewport_h);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
+
+ if (!atomic && fb && fb != crtc->primary->fb) {
+ amdgpu_fb = to_amdgpu_framebuffer(fb);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
+ if (unlikely(r != 0))
+ return r;
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
+ }
+
+ /* Bytes per pixel may have changed */
+ dce_v6_0_bandwidth_update(adev);
+
+ return 0;
+
+}
+
+static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+ WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+ NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+ WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+ NI_GRPH_PRESCALE_BYPASS);
+ WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+ NI_OVL_PRESCALE_BYPASS);
+ WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+ NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+
+
+ WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->lut_r[i] << 20) |
+ (amdgpu_crtc->lut_g[i] << 10) |
+ (amdgpu_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+ WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+ NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+ WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+ NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+ WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_OUTPUT_CSC_GRPH_MODE(0) |
+ NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+ /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+ WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
+
+
+}
+
+static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+ switch (amdgpu_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ return dig->linkb ? 1 : 0;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ return dig->linkb ? 3 : 2;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return dig->linkb ? 5 : 4;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ return 6;
+ default:
+ DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+ return 0;
+ }
+}
+
+/**
+ * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders. For non-DP
+ * monitors a dedicated PPLL must be used. If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself. If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ *
+ */
+static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ u32 pll_in_use;
+ int pll;
+
+ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+ if (adev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else
+ return ATOM_PPLL0;
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+
+ /* PPLL1, and PPLL2 */
+ pll_in_use = amdgpu_pll_get_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+}
+
+static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint32_t cur_lock;
+
+ cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+ else
+ cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+ WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+
+}
+
+static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(amdgpu_crtc->cursor_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ lower_32_bits(amdgpu_crtc->cursor_addr));
+
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+ EVERGREEN_CURSOR_EN |
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+}
+
+static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int xorigin = 0, yorigin = 0;
+
+ int w = amdgpu_crtc->cursor_width;
+
+ /* avivo cursor are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+
+ WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+ WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
+ return 0;
+}
+
+static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ int ret;
+
+ dce_v6_0_lock_cursor(crtc, true);
+ ret = dce_v6_0_cursor_move_locked(crtc, x, y);
+ dce_v6_0_lock_cursor(crtc, false);
+
+ return ret;
+}
+
+static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height,
+ int32_t hot_x,
+ int32_t hot_y)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *aobj;
+ int ret;
+
+ if (!handle) {
+ /* turn off cursor */
+ dce_v6_0_hide_cursor(crtc);
+ obj = NULL;
+ goto unpin;
+ }
+
+ if ((width > amdgpu_crtc->max_cursor_width) ||
+ (height > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+ return -ENOENT;
+ }
+
+ aobj = gem_to_amdgpu_bo(obj);
+ ret = amdgpu_bo_reserve(aobj, false);
+ if (ret != 0) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ amdgpu_bo_unreserve(aobj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+
+ dce_v6_0_lock_cursor(crtc, true);
+
+ if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ hot_y != amdgpu_crtc->cursor_hot_y) {
+ int x, y;
+
+ x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+ y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+ dce_v6_0_cursor_move_locked(crtc, x, y);
+
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
+ }
+
+ dce_v6_0_show_cursor(crtc);
+ dce_v6_0_lock_cursor(crtc, false);
+
+unpin:
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ ret = amdgpu_bo_reserve(aobj, false);
+ if (likely(ret == 0)) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ }
+
+ amdgpu_crtc->cursor_bo = obj;
+ return 0;
+}
+
+static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo) {
+ dce_v6_0_lock_cursor(crtc, true);
+
+ dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+ amdgpu_crtc->cursor_y);
+
+ dce_v6_0_show_cursor(crtc);
+ dce_v6_0_lock_cursor(crtc, false);
+ }
+}
+
+static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int i;
+
+ /* userspace palettes are always correct as is */
+ for (i = 0; i < size; i++) {
+ amdgpu_crtc->lut_r[i] = red[i] >> 6;
+ amdgpu_crtc->lut_g[i] = green[i] >> 6;
+ amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+ }
+ dce_v6_0_crtc_load_lut(crtc);
+
+ return 0;
+}
+
+static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
+ .cursor_set2 = dce_v6_0_crtc_cursor_set2,
+ .cursor_move = dce_v6_0_crtc_cursor_move,
+ .gamma_set = dce_v6_0_crtc_gamma_set,
+ .set_config = amdgpu_crtc_set_config,
+ .destroy = dce_v6_0_crtc_destroy,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ amdgpu_crtc->enabled = true;
+ amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+ amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+ /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
+ amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+ drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ dce_v6_0_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ if (amdgpu_crtc->enabled)
+ amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+ amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+ amdgpu_crtc->enabled = false;
+ break;
+ }
+ /* adjust pm to dpms */
+ amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
+{
+ /* disable crtc pair power gating before programming */
+ amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+ amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+ dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
+{
+ dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
+{
+
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_atom_ss ss;
+ int i;
+
+ dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ int r;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_bo *abo;
+
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve abo before unpin\n");
+ else {
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
+ }
+ }
+ /* disable the GRPH */
+ dce_v6_0_grph_enable(crtc, false);
+
+ amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->mode_info.crtcs[i] &&
+ adev->mode_info.crtcs[i]->enabled &&
+ i != amdgpu_crtc->crtc_id &&
+ amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+ /* one other crtc is using this pll don't turn
+ * off the pll
+ */
+ goto done;
+ }
+ }
+
+ switch (amdgpu_crtc->pll_id) {
+ case ATOM_PPLL1:
+ case ATOM_PPLL2:
+ /* disable the ppll */
+ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ break;
+ default:
+ break;
+ }
+done:
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->adjusted_clock = 0;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (!amdgpu_crtc->adjusted_clock)
+ return -EINVAL;
+
+ amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+ amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+ dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+ amdgpu_atombios_crtc_scaler_setup(crtc);
+ dce_v6_0_cursor_reset(crtc);
+ /* update the hw version fpr dpm */
+ amdgpu_crtc->hw_mode = *adjusted_mode;
+
+ return 0;
+}
+
+static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ amdgpu_crtc->encoder = encoder;
+ amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+ break;
+ }
+ }
+ if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ return false;
+ }
+ if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ return false;
+ if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+ return false;
+ /* pick pll */
+ amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
+ /* if we can't get a PPLL for a non-DP encoder, fail */
+ if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+ !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+ return false;
+
+ return true;
+}
+
+static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
+ .dpms = dce_v6_0_crtc_dpms,
+ .mode_fixup = dce_v6_0_crtc_mode_fixup,
+ .mode_set = dce_v6_0_crtc_mode_set,
+ .mode_set_base = dce_v6_0_crtc_set_base,
+ .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
+ .prepare = dce_v6_0_crtc_prepare,
+ .commit = dce_v6_0_crtc_commit,
+ .load_lut = dce_v6_0_crtc_load_lut,
+ .disable = dce_v6_0_crtc_disable,
+};
+
+static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ int i;
+
+ amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+ (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (amdgpu_crtc == NULL)
+ return -ENOMEM;
+
+ drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+ amdgpu_crtc->crtc_id = index;
+ adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+ amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
+ amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
+ adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+ for (i = 0; i < 256; i++) {
+ amdgpu_crtc->lut_r[i] = i << 2;
+ amdgpu_crtc->lut_g[i] = i << 2;
+ amdgpu_crtc->lut_b[i] = i << 2;
+ }
+
+ amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->adjusted_clock = 0;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int dce_v6_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
+ adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
+
+ dce_v6_0_set_display_funcs(adev);
+ dce_v6_0_set_irq_funcs(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_OLAND:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dce_v6_0_sw_init(void *handle)
+{
+ int r, i;
+ bool ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = 8; i < 20; i += 2) {
+ r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+ r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+ if (r)
+ return r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev->ddev->mode_config.async_page_flip = true;
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ /* allocate crtcs */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = dce_v6_0_crtc_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
+ if (ret)
+ amdgpu_print_display_setup(adev->ddev);
+ else
+ return -EINVAL;
+
+ /* setup afmt */
+ r = dce_v6_0_afmt_init(adev);
+ if (r)
+ return r;
+
+ r = dce_v6_0_audio_init(adev);
+ if (r)
+ return r;
+
+ drm_kms_helper_poll_init(adev->ddev);
+
+ return r;
+}
+
+static int dce_v6_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->mode_info.bios_hardcoded_edid);
+
+ drm_kms_helper_poll_fini(adev->ddev);
+
+ dce_v6_0_audio_fini(adev);
+ dce_v6_0_afmt_fini(adev);
+
+ drm_mode_config_cleanup(adev->ddev);
+ adev->mode_info.mode_config_initialized = false;
+
+ return 0;
+}
+
+static int dce_v6_0_hw_init(void *handle)
+{
+ int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* init dig PHYs, disp eng pll */
+ amdgpu_atombios_encoder_init_dig(adev);
+ amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+ /* initialize hpd */
+ dce_v6_0_hpd_init(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+ }
+
+ dce_v6_0_pageflip_interrupt_init(adev);
+
+ return 0;
+}
+
+static int dce_v6_0_hw_fini(void *handle)
+{
+ int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ dce_v6_0_hpd_fini(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+ }
+
+ dce_v6_0_pageflip_interrupt_fini(adev);
+
+ return 0;
+}
+
+static int dce_v6_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_atombios_scratch_regs_save(adev);
+
+ return dce_v6_0_hw_fini(handle);
+}
+
+static int dce_v6_0_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ ret = dce_v6_0_hw_init(handle);
+
+ amdgpu_atombios_scratch_regs_restore(adev);
+
+ /* turn on the BL */
+ if (adev->mode_info.bl_encoder) {
+ u8 bl_level = amdgpu_display_backlight_get_level(adev,
+ adev->mode_info.bl_encoder);
+ amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+ bl_level);
+ }
+
+ return ret;
+}
+
+static bool dce_v6_0_is_idle(void *handle)
+{
+ return true;
+}
+
+static int dce_v6_0_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int dce_v6_0_soft_reset(void *handle)
+{
+ DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+ return 0;
+}
+
+static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ u32 reg_block, interrupt_mask;
+
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ switch (crtc) {
+ case 0:
+ reg_block = SI_CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ reg_block = SI_CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ reg_block = SI_CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ reg_block = SI_CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ reg_block = SI_CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ reg_block = SI_CRTC5_REGISTER_OFFSET;
+ break;
+ default:
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ interrupt_mask = RREG32(INT_MASK + reg_block);
+ interrupt_mask &= ~VBLANK_INT_MASK;
+ WREG32(INT_MASK + reg_block, interrupt_mask);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ interrupt_mask = RREG32(INT_MASK + reg_block);
+ interrupt_mask |= VBLANK_INT_MASK;
+ WREG32(INT_MASK + reg_block, interrupt_mask);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+
+}
+
+static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (type) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+ break;
+ default:
+ DRM_DEBUG("invalid hdp %d\n", type);
+ return 0;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CRTC_IRQ_VBLANK1:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK2:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK3:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK4:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK5:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK6:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE1:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE2:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE3:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE4:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE5:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE6:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned crtc = entry->src_id - 1;
+ uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+ unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+ switch (entry->src_data) {
+ case 0: /* vblank */
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
+ WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
+ }
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ break;
+ case 1: /* vline */
+ if (disp_int & interrupt_status_offsets[crtc].vline)
+ WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+ break;
+ }
+
+ return 0;
+}
+
+static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 reg;
+
+ if (type >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", type);
+ return -EINVAL;
+ }
+
+ reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+ if (state == AMDGPU_IRQ_STATE_DISABLE)
+ WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+ reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+ else
+ WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+ reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+ return 0;
+}
+
+static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned long flags;
+ unsigned crtc_id;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_flip_work *works;
+
+ crtc_id = (entry->src_id - 8) >> 1;
+ amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+ if (crtc_id >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+
+ if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+ GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+ WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+ GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+ /* IRQ could occur when in initial stage */
+ if (amdgpu_crtc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ works = amdgpu_crtc->pflip_works;
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+ "AMDGPU_FLIP_SUBMITTED(%d)\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return 0;
+ }
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ amdgpu_crtc->pflip_works = NULL;
+
+ /* wakeup usersapce */
+ if (works->event)
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ schedule_work(&works->unpin_work);
+
+ return 0;
+}
+
+static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t disp_int, mask, int_control, tmp;
+ unsigned hpd;
+
+ if (entry->src_data >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+ return 0;
+ }
+
+ hpd = entry->src_data;
+ disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+ mask = interrupt_status_offsets[hpd].hpd;
+ int_control = hpd_int_control_offsets[hpd];
+
+ if (disp_int & mask) {
+ tmp = RREG32(int_control);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(int_control, tmp);
+ schedule_work(&adev->hotplug_work);
+ DRM_INFO("IH: HPD%d\n", hpd + 1);
+ }
+
+ return 0;
+
+}
+
+static int dce_v6_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dce_v6_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+ .name = "dce_v6_0",
+ .early_init = dce_v6_0_early_init,
+ .late_init = NULL,
+ .sw_init = dce_v6_0_sw_init,
+ .sw_fini = dce_v6_0_sw_fini,
+ .hw_init = dce_v6_0_hw_init,
+ .hw_fini = dce_v6_0_hw_fini,
+ .suspend = dce_v6_0_suspend,
+ .resume = dce_v6_0_resume,
+ .is_idle = dce_v6_0_is_idle,
+ .wait_for_idle = dce_v6_0_wait_for_idle,
+ .soft_reset = dce_v6_0_soft_reset,
+ .set_clockgating_state = dce_v6_0_set_clockgating_state,
+ .set_powergating_state = dce_v6_0_set_powergating_state,
+};
+
+static void
+dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+ /* need to call this here rather than in prepare() since we need some crtc info */
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ /* set scaler clears this on some chips */
+ dce_v6_0_set_interleave(encoder->crtc, mode);
+
+ if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ dce_v6_0_afmt_enable(encoder, true);
+ dce_v6_0_afmt_setmode(encoder, adjusted_mode);
+ }
+}
+
+static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
+{
+
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+ if ((amdgpu_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+ ENCODER_OBJECT_ID_NONE)) {
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ if (dig) {
+ dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
+ if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+ dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+ }
+ }
+
+ amdgpu_atombios_scratch_regs_lock(adev, true);
+
+ if (connector) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ /* select the clock/data port if it uses a router */
+ if (amdgpu_connector->router.cd_valid)
+ amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+ /* turn eDP panel on for mode set */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_atombios_encoder_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ }
+
+ /* this is needed for the pll/ss setup to work correctly in some cases */
+ amdgpu_atombios_encoder_set_crtc_source(encoder);
+ /* set up the FMT blocks */
+ dce_v6_0_program_fmt(encoder);
+}
+
+static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ /* need to call this here as we need the crtc set up */
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
+{
+
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig;
+
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (amdgpu_atombios_encoder_is_digital(encoder)) {
+ if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ dce_v6_0_afmt_enable(encoder, false);
+ dig = amdgpu_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
+ amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
+ .dpms = dce_v6_0_ext_dpms,
+ .mode_fixup = dce_v6_0_ext_mode_fixup,
+ .prepare = dce_v6_0_ext_prepare,
+ .mode_set = dce_v6_0_ext_mode_set,
+ .commit = dce_v6_0_ext_commit,
+ .disable = dce_v6_0_ext_disable,
+ /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
+ .dpms = amdgpu_atombios_encoder_dpms,
+ .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+ .prepare = dce_v6_0_encoder_prepare,
+ .mode_set = dce_v6_0_encoder_mode_set,
+ .commit = dce_v6_0_encoder_commit,
+ .disable = dce_v6_0_encoder_disable,
+ .detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
+ .dpms = amdgpu_atombios_encoder_dpms,
+ .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+ .prepare = dce_v6_0_encoder_prepare,
+ .mode_set = dce_v6_0_encoder_mode_set,
+ .commit = dce_v6_0_encoder_commit,
+ .detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+ kfree(amdgpu_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
+ .destroy = dce_v6_0_encoder_destroy,
+};
+
+static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->encoder_enum == encoder_enum) {
+ amdgpu_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return;
+
+ encoder = &amdgpu_encoder->base;
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ encoder->possible_crtcs = 0x1;
+ break;
+ case 2:
+ default:
+ encoder->possible_crtcs = 0x3;
+ break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
+
+ amdgpu_encoder->enc_priv = NULL;
+ amdgpu_encoder->encoder_enum = encoder_enum;
+ amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ amdgpu_encoder->devices = supported_device;
+ amdgpu_encoder->rmx_type = RMX_OFF;
+ amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+ amdgpu_encoder->is_ext_encoder = false;
+ amdgpu_encoder->caps = caps;
+
+ switch (amdgpu_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ amdgpu_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+ } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+ }
+ drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_SI170B:
+ case ENCODER_OBJECT_ID_CH7303:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+ case ENCODER_OBJECT_ID_TITFP513:
+ case ENCODER_OBJECT_ID_VT1623:
+ case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ /* these are handled by the primary encoders */
+ amdgpu_encoder->is_ext_encoder = true;
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
+ else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ else
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
+ break;
+ }
+}
+
+static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
+ .set_vga_render_state = &dce_v6_0_set_vga_render_state,
+ .bandwidth_update = &dce_v6_0_bandwidth_update,
+ .vblank_get_counter = &dce_v6_0_vblank_get_counter,
+ .vblank_wait = &dce_v6_0_vblank_wait,
+ .is_display_hung = &dce_v6_0_is_display_hung,
+ .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+ .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+ .hpd_sense = &dce_v6_0_hpd_sense,
+ .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
+ .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
+ .page_flip = &dce_v6_0_page_flip,
+ .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
+ .add_encoder = &dce_v6_0_encoder_add,
+ .add_connector = &amdgpu_connector_add,
+ .stop_mc_access = &dce_v6_0_stop_mc_access,
+ .resume_mc_access = &dce_v6_0_resume_mc_access,
+};
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dce_v6_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
+ .set = dce_v6_0_set_crtc_interrupt_state,
+ .process = dce_v6_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
+ .set = dce_v6_0_set_pageflip_interrupt_state,
+ .process = dce_v6_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
+ .set = dce_v6_0_set_hpd_interrupt_state,
+ .process = dce_v6_0_hpd_irq,
+};
+
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
+
+ adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index c031ff99fe3e..6a5528105bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,22 +21,9 @@
*
*/
-#ifndef TONGA_SMUMGR_H
-#define TONGA_SMUMGR_H
+#ifndef __DCE_V6_0_H__
+#define __DCE_V6_0_H__
-#include "tonga_ppsmc.h"
-
-int tonga_smu_init(struct amdgpu_device *adev);
-int tonga_smu_fini(struct amdgpu_device *adev);
-int tonga_smu_start(struct amdgpu_device *adev);
-
-struct tonga_smu_private_data
-{
- uint8_t *header;
- uint32_t smu_buffer_addr_high;
- uint32_t smu_buffer_addr_low;
- uint32_t header_addr_high;
- uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4fdfab1e9200..5966166ec94c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/
static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
- unsigned i = 0;
+ unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
@@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame.
*/
while (dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc))
break;
}
@@ -395,15 +397,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
WREG32(mmDC_HPD1_CONTROL, tmp);
@@ -426,6 +419,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
default:
break;
}
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
+ break;
+ default:
+ continue;
+ }
+
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ continue;
+ }
+
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -604,6 +636,52 @@ static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ num_crtc = 6;
+ break;
+ case CHIP_KAVERI:
+ num_crtc = 4;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ num_crtc = 2;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v8_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v8_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -1501,13 +1579,13 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
- value = (sad->channels <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
- (sad->byte2 <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
- (sad->freq <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
- max_channels = sad->channels;
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
+ max_channels = sad->channels;
}
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
@@ -1613,7 +1691,7 @@ static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
- WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
+ WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
@@ -1693,6 +1771,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
+
offset = dig->afmt->offset;
/* hdmi deep color mode general control packets setup, if bpc > 8 */
@@ -1817,7 +1896,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
- HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
+ HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
(2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
@@ -1826,13 +1905,12 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
- /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
- /* enable audio after to setting up hw */
+ /* enable audio after setting up hw */
dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
}
@@ -1944,7 +2022,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
@@ -1952,6 +2030,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1971,23 +2050,23 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
- rbo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(rbo);
+ fb_location = amdgpu_bo_gpu_offset(abo);
} else {
- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
- amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@@ -1999,7 +2078,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
- (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
@@ -2056,8 +2135,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2137,17 +2217,17 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@@ -2552,7 +2632,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.gamma_set = dce_v8_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2619,16 +2699,16 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
+ DRM_ERROR("failed to reserve abo before unpin\n");
else {
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */
@@ -2653,7 +2733,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL2:
/* disable the ppll */
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
case ATOM_PPLL0:
/* disable the ppll */
@@ -2803,21 +2883,20 @@ static int dce_v8_0_early_init(void *handle)
dce_v8_0_set_display_funcs(adev);
dce_v8_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_KAVERI:
- adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6; /* ? */
break;
@@ -3236,7 +3315,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
drm_handle_vblank(adev->ddev, crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
@@ -3245,7 +3323,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 77016852b252..7d0770c3a49b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+void dce_v8_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
new file mode 100644
index 000000000000..c2bd9f045532
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "dce_v8_0.h"
+#endif
+#include "dce_v10_0.h"
+#include "dce_v11_0.h"
+#include "dce_virtual.h"
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+
+/**
+ * dce_virtual_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+ return;
+}
+
+static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ return 0;
+}
+
+static void dce_virtual_page_flip(struct amdgpu_device *adev,
+ int crtc_id, u64 crtc_base, bool async)
+{
+ return;
+}
+
+static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ *vbl = 0;
+ *position = 0;
+
+ return -EINVAL;
+}
+
+static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ return true;
+}
+
+static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ return;
+}
+
+static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
+{
+ return false;
+}
+
+static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dce_v8_0_disable_dce(adev);
+ break;
+#endif
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ dce_v10_0_disable_dce(adev);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ dce_v11_0_disable_dce(adev);
+ break;
+ case CHIP_TOPAZ:
+ /* no DCE */
+ return;
+ default:
+ DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+ }
+
+ return;
+}
+static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ return;
+}
+
+static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
+ bool render)
+{
+ return;
+}
+
+/**
+ * dce_virtual_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
+{
+ return;
+}
+
+static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t size)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int i;
+
+ /* userspace palettes are always correct as is */
+ for (i = 0; i < size; i++) {
+ amdgpu_crtc->lut_r[i] = red[i] >> 6;
+ amdgpu_crtc->lut_g[i] = green[i] >> 6;
+ amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+ }
+
+ return 0;
+}
+
+static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
+ .cursor_set2 = NULL,
+ .cursor_move = NULL,
+ .gamma_set = dce_virtual_crtc_gamma_set,
+ .set_config = amdgpu_crtc_set_config,
+ .destroy = dce_virtual_crtc_destroy,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ amdgpu_crtc->enabled = true;
+ /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
+ amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ amdgpu_crtc->enabled = false;
+ break;
+ }
+}
+
+
+static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
+{
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
+{
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ int r;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_bo *abo;
+
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve abo before unpin\n");
+ else {
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
+ }
+ }
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+}
+
+static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ /* update the hw version fpr dpm */
+ amdgpu_crtc->hw_mode = *adjusted_mode;
+
+ return 0;
+}
+
+static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ amdgpu_crtc->encoder = encoder;
+ amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+ break;
+ }
+ }
+ if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+
+static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return 0;
+}
+
+static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
+{
+ return;
+}
+
+static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return 0;
+}
+
+static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
+ .dpms = dce_virtual_crtc_dpms,
+ .mode_fixup = dce_virtual_crtc_mode_fixup,
+ .mode_set = dce_virtual_crtc_mode_set,
+ .mode_set_base = dce_virtual_crtc_set_base,
+ .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
+ .prepare = dce_virtual_crtc_prepare,
+ .commit = dce_virtual_crtc_commit,
+ .load_lut = dce_virtual_crtc_load_lut,
+ .disable = dce_virtual_crtc_disable,
+};
+
+static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ int i;
+
+ amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+ (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (amdgpu_crtc == NULL)
+ return -ENOMEM;
+
+ drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+ amdgpu_crtc->crtc_id = index;
+ adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+ for (i = 0; i < 256; i++) {
+ amdgpu_crtc->lut_r[i] = i << 2;
+ amdgpu_crtc->lut_g[i] = i << 2;
+ amdgpu_crtc->lut_b[i] = i << 2;
+ }
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int dce_virtual_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+ dce_virtual_set_display_funcs(adev);
+ dce_virtual_set_irq_funcs(adev);
+
+ adev->mode_info.num_crtc = 1;
+ adev->mode_info.num_hpd = 1;
+ adev->mode_info.num_dig = 1;
+ return 0;
+}
+
+static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_i2c_bus_rec ddc_bus;
+ struct amdgpu_router router;
+ struct amdgpu_hpd hpd;
+
+ /* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = AMDGPU_HPD_NONE;
+ /* needed for aux chan transactions */
+ ddc_bus.hpd = hpd.hpd;
+
+ memset(&router, 0, sizeof(router));
+ router.ddc_valid = false;
+ router.cd_valid = false;
+ amdgpu_display_add_connector(adev,
+ 0,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
+ CONNECTOR_OBJECT_ID_VIRTUAL,
+ &hpd,
+ &router);
+
+ amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 0);
+
+ amdgpu_link_encoder_connector(adev->ddev);
+
+ return true;
+}
+
+static int dce_virtual_sw_init(void *handle)
+{
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
+ if (r)
+ return r;
+
+ adev->ddev->max_vblank_count = 0;
+
+ adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ /* allocate crtcs */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = dce_virtual_crtc_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ dce_virtual_get_connector_info(adev);
+ amdgpu_print_display_setup(adev->ddev);
+
+ drm_kms_helper_poll_init(adev->ddev);
+
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
+}
+
+static int dce_virtual_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->mode_info.bios_hardcoded_edid);
+
+ drm_kms_helper_poll_fini(adev->ddev);
+
+ drm_mode_config_cleanup(adev->ddev);
+ adev->mode_info.mode_config_initialized = false;
+ return 0;
+}
+
+static int dce_virtual_hw_init(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_hw_fini(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_suspend(void *handle)
+{
+ return dce_virtual_hw_fini(handle);
+}
+
+static int dce_virtual_resume(void *handle)
+{
+ return dce_virtual_hw_init(handle);
+}
+
+static bool dce_virtual_is_idle(void *handle)
+{
+ return true;
+}
+
+static int dce_virtual_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dce_virtual_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs dce_virtual_ip_funcs = {
+ .name = "dce_virtual",
+ .early_init = dce_virtual_early_init,
+ .late_init = NULL,
+ .sw_init = dce_virtual_sw_init,
+ .sw_fini = dce_virtual_sw_fini,
+ .hw_init = dce_virtual_hw_init,
+ .hw_fini = dce_virtual_hw_fini,
+ .suspend = dce_virtual_suspend,
+ .resume = dce_virtual_resume,
+ .is_idle = dce_virtual_is_idle,
+ .wait_for_idle = dce_virtual_wait_for_idle,
+ .soft_reset = dce_virtual_soft_reset,
+ .set_clockgating_state = dce_virtual_set_clockgating_state,
+ .set_powergating_state = dce_virtual_set_powergating_state,
+};
+
+/* these are handled by the primary encoders */
+static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void
+dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return;
+}
+
+static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void
+dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ return;
+}
+
+static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ /* set the active encoder to connector routing */
+ amdgpu_encoder_set_active_device(encoder);
+
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
+ .dpms = dce_virtual_encoder_dpms,
+ .mode_fixup = dce_virtual_encoder_mode_fixup,
+ .prepare = dce_virtual_encoder_prepare,
+ .mode_set = dce_virtual_encoder_mode_set,
+ .commit = dce_virtual_encoder_commit,
+ .disable = dce_virtual_encoder_disable,
+};
+
+static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ kfree(amdgpu_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+ .destroy = dce_virtual_encoder_destroy,
+};
+
+static void dce_virtual_encoder_add(struct amdgpu_device *adev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->encoder_enum == encoder_enum) {
+ amdgpu_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return;
+
+ encoder = &amdgpu_encoder->base;
+ encoder->possible_crtcs = 0x1;
+ amdgpu_encoder->enc_priv = NULL;
+ amdgpu_encoder->encoder_enum = encoder_enum;
+ amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ amdgpu_encoder->devices = supported_device;
+ amdgpu_encoder->rmx_type = RMX_OFF;
+ amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+ amdgpu_encoder->is_ext_encoder = false;
+ amdgpu_encoder->caps = caps;
+
+ drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+ DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+}
+
+static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
+ .set_vga_render_state = &dce_virtual_set_vga_render_state,
+ .bandwidth_update = &dce_virtual_bandwidth_update,
+ .vblank_get_counter = &dce_virtual_vblank_get_counter,
+ .vblank_wait = &dce_virtual_vblank_wait,
+ .is_display_hung = &dce_virtual_is_display_hung,
+ .backlight_set_level = NULL,
+ .backlight_get_level = NULL,
+ .hpd_sense = &dce_virtual_hpd_sense,
+ .hpd_set_polarity = &dce_virtual_hpd_set_polarity,
+ .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
+ .page_flip = &dce_virtual_page_flip,
+ .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
+ .add_encoder = &dce_virtual_encoder_add,
+ .add_connector = &amdgpu_connector_add,
+ .stop_mc_access = &dce_virtual_stop_mc_access,
+ .resume_mc_access = &dce_virtual_resume_mc_access,
+};
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dce_virtual_display_funcs;
+}
+
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+ struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
+ struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
+ unsigned crtc = 0;
+ drm_handle_vblank(adev->ddev, crtc);
+ dce_virtual_pageflip_irq(adev, NULL, NULL);
+ hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ if (state && !adev->mode_info.vsync_timer_enabled) {
+ DRM_DEBUG("Enable software vsync timer\n");
+ hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
+ hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ } else if (!state && adev->mode_info.vsync_timer_enabled) {
+ DRM_DEBUG("Disable software vsync timer\n");
+ hrtimer_cancel(&adev->mode_info.vblank_timer);
+ }
+
+ adev->mode_info.vsync_timer_enabled = state;
+ DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CRTC_IRQ_VBLANK1:
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
+ int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+}
+
+static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned crtc = 0;
+ unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
+
+ dce_virtual_crtc_vblank_int_ack(adev, crtc);
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
+ }
+ dce_virtual_pageflip_irq(adev, NULL, NULL);
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ return 0;
+}
+
+static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ if (type >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", type);
+ return -EINVAL;
+ }
+ DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
+
+ return 0;
+}
+
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned long flags;
+ unsigned crtc_id = 0;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_flip_work *works;
+
+ crtc_id = 0;
+ amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+ if (crtc_id >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+
+ /* IRQ could occur when in initial stage */
+ if (amdgpu_crtc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ works = amdgpu_crtc->pflip_works;
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+ "AMDGPU_FLIP_SUBMITTED(%d)\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return 0;
+ }
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ amdgpu_crtc->pflip_works = NULL;
+
+ /* wakeup usersapce */
+ if (works->event)
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ schedule_work(&works->unpin_work);
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
+ .set = dce_virtual_set_crtc_irq_state,
+ .process = dce_virtual_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
+ .set = dce_virtual_set_pageflip_irq_state,
+ .process = dce_virtual_pageflip_irq,
+};
+
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index 5983e3150cc5..e239243f6ebc 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -21,21 +21,11 @@
*
*/
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __DCE_VIRTUAL_H__
+#define __DCE_VIRTUAL_H__
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
- uint8_t *header;
- uint8_t *mec_image;
- uint32_t header_addr_high;
- uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs dce_virtual_ip_funcs;
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
deleted file mode 100644
index ed03b75175d4..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_smum.h"
-
-MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int fiji_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/fiji_smc.bin";
- int err;
-
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int fiji_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = fiji_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int fiji_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int fiji_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = fiji_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = fiji_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int fiji_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- mutex_lock(&adev->pm.mutex);
- fiji_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int fiji_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_hw_fini(adev);
-
- return 0;
-}
-
-static int fiji_dpm_resume(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_hw_init(adev);
-
- return 0;
-}
-
-static int fiji_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int fiji_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs fiji_dpm_ip_funcs = {
- .name = "fiji_dpm",
- .early_init = fiji_dpm_early_init,
- .late_init = NULL,
- .sw_init = fiji_dpm_sw_init,
- .sw_fini = fiji_dpm_sw_fini,
- .hw_init = fiji_dpm_hw_init,
- .hw_fini = fiji_dpm_hw_fini,
- .suspend = fiji_dpm_suspend,
- .resume = fiji_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = fiji_dpm_set_clockgating_state,
- .set_powergating_state = fiji_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &fiji_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
deleted file mode 100644
index b3e19ba4c57f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ /dev/null
@@ -1,863 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_ppsmc.h"
-#include "fiji_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-#define FIJI_SMC_SIZE 0x20000
-
-static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, 0x20000);
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!fiji_is_smc_ram_running(adev))
- {
- return -EINVAL;
- }
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- if (!fiji_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return fiji_send_msg_to_smc(adev, msg);
-}
-
-static int fiji_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return fiji_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!fiji_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t *data;
- unsigned long flags;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > FIJI_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- WREG32(mmSMC_IND_DATA_0, data[0]);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = fiji_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = fiji_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_smu_stop_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
- (fw_type == UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
- data_size = le32_to_cpu(header->jt_size) << 2;
- }
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
-
- return 0;
-}
-
-static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK;
-
- if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
- int i;
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = fiji_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Clear status */
- WREG32_SMC(ixSMU_STATUS, 0);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Set SMU Auto Start */
- val = RREG32_SMC(ixSMU_INPUT_DATA);
- val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
- WREG32_SMC(ixSMU_INPUT_DATA, val);
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Interrupt is not enabled by firmware\n");
- return -EINVAL;
- }
-
- /* Call Test SMU message with 0x20000 offset
- * to trigger SMU start
- */
- fiji_send_msg_to_smc_offset(adev);
- DRM_INFO("[FM]try triger smu start\n");
- /* Wait for done bit to be set */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMU_STATUS);
- if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMU start\n");
- return -EINVAL;
- }
-
- /* Check pass/failed indicator */
- val = RREG32_SMC(ixSMU_STATUS);
- if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
- DRM_ERROR("SMU Firmware start failed\n");
- return -EINVAL;
- }
- DRM_INFO("[FM]smu started\n");
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMU firmware initialization failed\n");
- return -EINVAL;
- }
- DRM_INFO("[FM]smu initialized\n");
-
- return 0;
-}
-
-static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
- int i, result;
- uint32_t val;
-
- /* wait for smc boot up */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
- if (val)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMC boot sequence is not completed\n");
- return -EINVAL;
- }
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = fiji_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Set smc instruct start point at 0x0 */
- fiji_program_jump_on_start(adev);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMC firmware initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int fiji_smu_start(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
-
- if (!fiji_is_smc_ram_running(adev)) {
- val = RREG32_SMC(ixSMU_FIRMWARE);
- if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
- DRM_INFO("[FM]start smu in nonprotection mode\n");
- result = fiji_smu_start_in_non_protection_mode(adev);
- if (result)
- return result;
- } else {
- DRM_INFO("[FM]start smu in protection mode\n");
- result = fiji_smu_start_in_protection_mode(adev);
- if (result)
- return result;
- }
- }
-
- return fiji_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
- .check_fw_load_finish = fiji_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int fiji_smu_init(struct amdgpu_device *adev)
-{
- struct fiji_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- uint32_t smu_internal_buffer_size = 200*4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- void *smu_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Allocate buffer for SMU internal buffer */
- ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, smu_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the SMU internal buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- private->smu_buffer_addr_low = lower_32_bits(mc_addr);
- private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
- adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
-
- return 0;
-}
-
-int fiji_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
new file mode 100644
index 000000000000..40abb6b81c09
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -0,0 +1,3362 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_ucode.h"
+#include "si/clearstate_si.h"
+#include "si/sid.h"
+
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
+#define STATIC_PER_CU_PG_ENABLE (1 << 3)
+#define DYN_PER_CU_PG_ENABLE (1 << 2)
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
+
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
+
+
+static const u32 verde_rlc_save_restore_register_list[] =
+{
+ (0x8000 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0xe80 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0xe80 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x98f0 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xe7c >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9148 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9148 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9150 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x897c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8d8c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac54 >> 2),
+ 0X00000000,
+ 0x3,
+ (0x9c00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9910 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9914 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9918 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x991c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9920 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9924 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9928 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x992c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9930 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9934 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9938 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x993c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9940 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9944 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9948 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x994c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9950 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9954 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9958 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x995c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9960 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9964 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9968 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x996c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9970 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9974 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9978 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x997c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9980 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9984 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9988 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x998c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c14 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c08 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0xe84 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0xe84 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x914c >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x914c >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9354 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9354 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9060 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9364 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x913c >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x90e0 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x90e4 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x90e0 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x90e4 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8e50 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c0c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8e58 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8e5c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9508 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x950c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9494 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x88cc >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x89b0 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8b10 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9830 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9838 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9a10 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9870 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9874 >> 2),
+ 0x00000000,
+ (0x8001 << 16) | (0x9870 >> 2),
+ 0x00000000,
+ (0x8001 << 16) | (0x9874 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9870 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9874 >> 2),
+ 0x00000000,
+ (0x8041 << 16) | (0x9870 >> 2),
+ 0x00000000,
+ (0x8041 << 16) | (0x9874 >> 2),
+ 0x00000000,
+ 0x00000000
+};
+
+static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ const struct rlc_firmware_header_v1_0 *rlc_hdr;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_name = "tahiti";
+ break;
+ case CHIP_PITCAIRN:
+ chip_name = "pitcairn";
+ break;
+ case CHIP_VERDE:
+ chip_name = "verde";
+ break;
+ case CHIP_OLAND:
+ chip_name = "oland";
+ break;
+ case CHIP_HAINAN:
+ chip_name = "hainan";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+
+out:
+ if (err) {
+ printk(KERN_ERR
+ "gfx6: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ }
+ return err;
+}
+
+static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
+{
+ const u32 num_tile_mode_states = 32;
+ u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+ switch (adev->gfx.config.mem_row_size_in_kb) {
+ case 1:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+ break;
+ case 2:
+ default:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+ break;
+ case 4:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+ break;
+ }
+
+ if (adev->asic_type == CHIP_VERDE ||
+ adev->asic_type == CHIP_OLAND ||
+ adev->asic_type == CHIP_HAINAN) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 1:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 2:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 3:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 4:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 5:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 6:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 7:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 9:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 10:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 11:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 12:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 13:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 14:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 15:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 16:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 17:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 21:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 22:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 23:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 24:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 25:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0: /* non-AA compressed depth or any compressed stencil */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 1: /* 2xAA/4xAA compressed depth only */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 2: /* 8xAA compressed depth only */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 8: /* 1D and 1D Array Surfaces */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 9: /* Displayable maps. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 10: /* Display 8bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 11: /* Display 16bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 12: /* Display 32bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 13: /* Thin. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 14: /* Thin 8 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 15: /* Thin 16 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 16: /* Thin 32 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 17: /* Thin 64 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 21: /* 8 bpp PRT. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 22: /* 16 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 23: /* 32 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 24: /* 64 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 25: /* 128 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ } else{
+
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ }
+
+}
+
+static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance)
+{
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = INSTANCE_BROADCAST_WRITES;
+ else
+ data = INSTANCE_INDEX(instance);
+
+ if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+ data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ else if (se_num == 0xffffffff)
+ data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+ else if (sh_num == 0xffffffff)
+ data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+ else
+ data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+ WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 gfx_v6_0_create_bitmask(u32 bit_width)
+{
+ return (u32)(((u64)1 << bit_width) - 1);
+}
+
+static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
+ u32 max_rb_num_per_se,
+ u32 sh_per_se)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_RB_BACKEND_DISABLE);
+ data &= BACKEND_DISABLE_MASK;
+ data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+ data >>= BACKEND_DISABLE_SHIFT;
+
+ mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+ return data & mask;
+}
+
+static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
+{
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
+ SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
+ break;
+ case CHIP_VERDE:
+ *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
+ break;
+ case CHIP_OLAND:
+ *rconf |= RB_YSEL;
+ break;
+ case CHIP_HAINAN:
+ *rconf |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, unsigned rb_mask,
+ unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on SI */
+ gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on SI */
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_per_se,
+ u32 max_rb_num_per_se)
+{
+ int i, j;
+ u32 data, mask;
+ u32 disabled_rbs = 0;
+ u32 enabled_rbs = 0;
+ unsigned num_rb_pipes;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
+ disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ mask = 1;
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
+ adev->gfx.config.backend_enable_mask = enabled_rbs;
+ adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < se_num; i++) {
+ gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
+ data = 0;
+ for (j = 0; j < sh_per_se; j++) {
+ switch (enabled_rbs & 3) {
+ case 1:
+ data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+ break;
+ case 2:
+ data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+ break;
+ case 3:
+ default:
+ data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+ break;
+ }
+ enabled_rbs >>= 2;
+ }
+ gfx_v6_0_raster_config(adev, &data);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes)
+ WREG32(PA_SC_RASTER_CONFIG, data);
+ else
+ gfx_v6_0_write_harvested_raster_configs(adev, data,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+/*
+static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+}
+*/
+
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ data &= INACTIVE_CUS_MASK;
+ data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+ data >>= INACTIVE_CUS_SHIFT;
+
+ mask = gfx_v6_0_create_bitmask(cu_per_sh);
+
+ return ~data & mask;
+}
+
+
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_per_se,
+ u32 cu_per_sh)
+{
+ int i, j, k;
+ u32 data, mask;
+ u32 active_cu = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+ active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+
+ mask = 1;
+ for (k = 0; k < 16; k++) {
+ mask <<= k;
+ if (active_cu & mask) {
+ data &= ~mask;
+ WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+ break;
+ }
+ }
+ }
+ }
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
+{
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 sx_debug_1;
+ u32 hdp_host_path_cntl;
+ u32 tmp;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ adev->gfx.config.max_shader_engines = 2;
+ adev->gfx.config.max_tile_pipes = 12;
+ adev->gfx.config.max_cu_per_sh = 8;
+ adev->gfx.config.max_sh_per_se = 2;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 12;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_PITCAIRN:
+ adev->gfx.config.max_shader_engines = 2;
+ adev->gfx.config.max_tile_pipes = 8;
+ adev->gfx.config.max_cu_per_sh = 5;
+ adev->gfx.config.max_sh_per_se = 2;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 8;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+ break;
+
+ case CHIP_VERDE:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+ adev->gfx.config.max_cu_per_sh = 5;
+ adev->gfx.config.max_sh_per_se = 2;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_OLAND:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+ adev->gfx.config.max_cu_per_sh = 6;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 2;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 16;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_HAINAN:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+ adev->gfx.config.max_cu_per_sh = 5;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 1;
+ adev->gfx.config.max_texture_channel_caches = 2;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 16;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+ WREG32(SRBM_INT_CNTL, 1);
+ WREG32(SRBM_INT_ACK, 1);
+
+ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+ adev->gfx.config.mem_max_burst_length_bytes = 256;
+ tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (adev->gfx.config.mem_row_size_in_kb > 4)
+ adev->gfx.config.mem_row_size_in_kb = 4;
+ adev->gfx.config.shader_engine_tile_size = 32;
+ adev->gfx.config.num_gpus = 1;
+ adev->gfx.config.multi_gpu_tile_size = 64;
+
+ gb_addr_config &= ~ROW_SIZE_MASK;
+ switch (adev->gfx.config.mem_row_size_in_kb) {
+ case 1:
+ default:
+ gb_addr_config |= ROW_SIZE(0);
+ break;
+ case 2:
+ gb_addr_config |= ROW_SIZE(1);
+ break;
+ case 4:
+ gb_addr_config |= ROW_SIZE(2);
+ break;
+ }
+ adev->gfx.config.gb_addr_config = gb_addr_config;
+
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+#if 0
+ if (adev->has_uvd) {
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+ }
+#endif
+ gfx_v6_0_tiling_mode_table_init(adev);
+
+ gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_backends_per_se);
+
+ gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_cu_per_sh);
+
+ gfx_v6_0_get_cu_info(adev);
+
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+ WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) |
+ SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) |
+ SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+ WREG32(CP_PERFMON_CNTL, 0);
+ WREG32(SQ_CONFIG, 0);
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+}
+
+
+static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.reg_base = SCRATCH_REG0;
+ for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+ adev->gfx.scratch.free[i] = true;
+ adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
+ }
+}
+
+static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+ ring->idx, scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
+static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ /* flush hdp cache */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, HDP_DEBUG0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0x1);
+}
+
+static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags)
+{
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ /* flush read cache over gart */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA);
+ amdgpu_ring_write(ring, 0xFFFFFFFF);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 10); /* poll interval */
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
+{
+ u32 header, control = 0;
+
+ /* insert SWITCH_BUFFER packet before first IB in the ring frame */
+ if (ctx_switch) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+
+ if (ib->flags & AMDGPU_IB_FLAG_CE)
+ header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+ else
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw | (vm_id << 24);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
+/**
+ * gfx_v6_0_ring_test_ib - basic ring IB test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Allocate an IB and execute it on the gfx ring (SI).
+ * Provides a basic gfx ring test to verify that IBs are working.
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct fence *f = NULL;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ long r;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
+ }
+ ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+ ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
+ ib.ptr[2] = 0xDEADBEEF;
+ ib.length_dw = 3;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ if (r)
+ goto err2;
+
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
+ goto err2;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto err2;
+ }
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ } else {
+ DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+
+err2:
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
+err1:
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
+static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+{
+ int i;
+ if (enable)
+ WREG32(CP_ME_CNTL, 0);
+ else {
+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+ WREG32(SCRATCH_UMSK, 0);
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ adev->gfx.gfx_ring[i].ready = false;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ adev->gfx.compute_ring[i].ready = false;
+ }
+ udelay(50);
+}
+
+static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+{
+ unsigned i;
+ const struct gfx_firmware_header_v1_0 *pfp_hdr;
+ const struct gfx_firmware_header_v1_0 *ce_hdr;
+ const struct gfx_firmware_header_v1_0 *me_hdr;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+ return -EINVAL;
+
+ gfx_v6_0_cp_gfx_enable(adev, false);
+ pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+
+ amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+ amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+ amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
+ int r, i;
+
+ r = amdgpu_ring_alloc(ring, 7 + 4);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ amdgpu_ring_write(ring, 0x1);
+ amdgpu_ring_write(ring, 0x0);
+ amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1);
+ amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+ amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+ amdgpu_ring_write(ring, 0xc000);
+ amdgpu_ring_write(ring, 0xe000);
+ amdgpu_ring_commit(ring);
+
+ gfx_v6_0_cp_gfx_enable(adev, true);
+
+ r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ amdgpu_ring_write(ring,
+ PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+ for (i = 0; i < ext->reg_count; i++)
+ amdgpu_ring_write(ring, ext->extent[i]);
+ }
+ }
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ amdgpu_ring_write(ring, 0x00000316);
+ amdgpu_ring_write(ring, 0x0000000e);
+ amdgpu_ring_write(ring, 0x00000010);
+
+ amdgpu_ring_commit(ring);
+
+ return 0;
+}
+
+static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+ u64 rptr_addr;
+
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ WREG32(CP_DEBUG, 0);
+ WREG32(SCRATCH_ADDR, 0);
+
+ /* ring 0 - compute and gfx */
+ /* Set ring buffer size */
+ ring = &adev->gfx.gfx_ring[0];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB0_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB0_WPTR, ring->wptr);
+
+ /* set the wb address whether it's enabled or not */
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+ WREG32(SCRATCH_UMSK, 0);
+
+ mdelay(1);
+ WREG32(CP_RB0_CNTL, tmp);
+
+ WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+ /* start the rings */
+ gfx_v6_0_cp_gfx_start(adev);
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+static u32 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ return ring->adev->wb.wb[ring->rptr_offs];
+}
+
+static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->gfx.gfx_ring[0])
+ return RREG32(CP_RB0_WPTR);
+ else if (ring == &adev->gfx.compute_ring[0])
+ return RREG32(CP_RB1_WPTR);
+ else if (ring == &adev->gfx.compute_ring[1])
+ return RREG32(CP_RB2_WPTR);
+ else
+ BUG();
+}
+
+static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32(CP_RB0_WPTR, ring->wptr);
+ (void)RREG32(CP_RB0_WPTR);
+}
+
+static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->gfx.compute_ring[0]) {
+ WREG32(CP_RB1_WPTR, ring->wptr);
+ (void)RREG32(CP_RB1_WPTR);
+ } else if (ring == &adev->gfx.compute_ring[1]) {
+ WREG32(CP_RB2_WPTR, ring->wptr);
+ (void)RREG32(CP_RB2_WPTR);
+ } else {
+ BUG();
+ }
+
+}
+
+static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+ u64 rptr_addr;
+
+ /* ring1 - compute only */
+ /* Set ring buffer size */
+
+ ring = &adev->gfx.compute_ring[0];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB1_CNTL, tmp);
+
+ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB1_WPTR, ring->wptr);
+
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB1_CNTL, tmp);
+ WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+ ring = &adev->gfx.compute_ring[1];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB2_CNTL, tmp);
+
+ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB2_WPTR, ring->wptr);
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB2_CNTL, tmp);
+ WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+ adev->gfx.compute_ring[0].ready = true;
+ adev->gfx.compute_ring[1].ready = true;
+
+ r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]);
+ if (r) {
+ adev->gfx.compute_ring[0].ready = false;
+ return r;
+ }
+
+ r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]);
+ if (r) {
+ adev->gfx.compute_ring[1].ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable)
+{
+ gfx_v6_0_cp_gfx_enable(adev, enable);
+}
+
+static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
+{
+ return gfx_v6_0_cp_gfx_load_microcode(adev);
+}
+
+static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp = RREG32(CP_INT_CNTL_RING0);
+ u32 mask;
+ int i;
+
+ if (enable)
+ tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ else
+ tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING0, tmp);
+
+ if (!enable) {
+ /* read a gfx register */
+ tmp = RREG32(DB_DEPTH_INFO);
+
+ mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+ break;
+ udelay(1);
+ }
+ }
+}
+
+static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
+{
+ int r;
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+ r = gfx_v6_0_cp_load_microcode(adev);
+ if (r)
+ return r;
+
+ r = gfx_v6_0_cp_gfx_resume(adev);
+ if (r)
+ return r;
+ r = gfx_v6_0_cp_compute_resume(adev);
+ if (r)
+ return r;
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+ return 0;
+}
+
+static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, 4); /* poll interval */
+
+ if (usepfp) {
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+
+ /* write new base address */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+ if (vm_id < 8) {
+ amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+ } else {
+ amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+ }
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ /* bits 0-15 are the VM contexts0-15 */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 1 << vm_id);
+
+ /* wait for the invalidate to complete */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0); /* ref */
+ amdgpu_ring_write(ring, 0); /* mask */
+ amdgpu_ring_write(ring, 0x20); /* poll interval */
+
+ if (usepfp) {
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ amdgpu_ring_write(ring, 0x0);
+
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+
+static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->gfx.rlc.save_restore_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
+ adev->gfx.rlc.save_restore_obj = NULL;
+ }
+
+ if (adev->gfx.rlc.clear_state_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+ adev->gfx.rlc.clear_state_obj = NULL;
+ }
+
+ if (adev->gfx.rlc.cp_table_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+ adev->gfx.rlc.cp_table_obj = NULL;
+ }
+}
+
+static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+{
+ const u32 *src_ptr;
+ volatile u32 *dst_ptr;
+ u32 dws, i;
+ u64 reg_list_mc_addr;
+ const struct cs_section_def *cs_data;
+ int r;
+
+ adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list;
+ adev->gfx.rlc.reg_list_size =
+ (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+
+ adev->gfx.rlc.cs_data = si_cs_data;
+ src_ptr = adev->gfx.rlc.reg_list;
+ dws = adev->gfx.rlc.reg_list_size;
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (src_ptr) {
+ /* save restore block */
+ if (adev->gfx.rlc.save_restore_obj == NULL) {
+
+ r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.save_restore_obj);
+
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ /* write the sr buffer */
+ dst_ptr = adev->gfx.rlc.sr_ptr;
+ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ }
+
+ if (cs_data) {
+ /* clear state block */
+ adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
+ dws = adev->gfx.rlc.clear_state_size + (256 / 4);
+
+ if (adev->gfx.rlc.clear_state_obj == NULL) {
+ r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.clear_state_obj);
+
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ }
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ /* set up the cs buffer */
+ dst_ptr = adev->gfx.rlc.cs_ptr;
+ reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
+ dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+ dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+ dst_ptr[2] = cpu_to_le32(adev->gfx.rlc.clear_state_size);
+ gfx_v6_0_get_csb_buffer(adev, &dst_ptr[(256/4)]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ return 0;
+}
+
+static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp;
+
+ tmp = RREG32(RLC_LB_CNTL);
+ if (enable)
+ tmp |= LOAD_BALANCE_ENABLE;
+ else
+ tmp &= ~LOAD_BALANCE_ENABLE;
+ WREG32(RLC_LB_CNTL, tmp);
+
+ if (!enable) {
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ WREG32(SPI_LB_CU_MASK, 0x00ff);
+ }
+
+}
+
+static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+ break;
+ udelay(1);
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
+{
+ u32 tmp;
+
+ tmp = RREG32(RLC_CNTL);
+ if (tmp != rlc)
+ WREG32(RLC_CNTL, rlc);
+}
+
+static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_CNTL);
+
+ if (data & RLC_ENABLE) {
+ data &= ~RLC_ENABLE;
+ WREG32(RLC_CNTL, data);
+
+ gfx_v6_0_wait_for_rlc_serdes(adev);
+ }
+
+ return orig;
+}
+
+static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
+{
+ WREG32(RLC_CNTL, 0);
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+ gfx_v6_0_wait_for_rlc_serdes(adev);
+}
+
+static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
+{
+ WREG32(RLC_CNTL, RLC_ENABLE);
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+ udelay(50);
+}
+
+static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(GRBM_SOFT_RESET);
+
+ tmp |= SOFT_RESET_RLC;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ udelay(50);
+ tmp &= ~SOFT_RESET_RLC;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ udelay(50);
+}
+
+static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ /* Enable LBPW only for DDR3 */
+ tmp = RREG32(MC_SEQ_MISC0);
+ if ((tmp & 0xF0000000) == 0xB0000000)
+ return true;
+ return false;
+}
+static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
+{
+}
+
+static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
+{
+ u32 i;
+ const struct rlc_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+
+ if (!adev->gfx.rlc_fw)
+ return -EINVAL;
+
+ gfx_v6_0_rlc_stop(adev);
+ gfx_v6_0_rlc_reset(adev);
+ gfx_v6_0_init_pg(adev);
+ gfx_v6_0_init_cg(adev);
+
+ WREG32(RLC_RL_BASE, 0);
+ WREG32(RLC_RL_SIZE, 0);
+ WREG32(RLC_LB_CNTL, 0);
+ WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+ WREG32(RLC_LB_CNTR_INIT, 0);
+ WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+
+ WREG32(RLC_MC_CNTL, 0);
+ WREG32(RLC_UCODE_CNTL, 0);
+
+ hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ fw_data = (const __le32 *)
+ (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ amdgpu_ucode_print_rlc_hdr(&hdr->header);
+
+ for (i = 0; i < fw_size; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+ }
+ WREG32(RLC_UCODE_ADDR, 0);
+
+ gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
+ gfx_v6_0_rlc_start(adev);
+
+ return 0;
+}
+
+static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig, tmp;
+
+ orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+ WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+
+ tmp = gfx_v6_0_halt_rlc(adev);
+
+ WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+
+ gfx_v6_0_wait_for_rlc_serdes(adev);
+ gfx_v6_0_update_rlc(adev, tmp);
+
+ WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+
+ data |= CGCG_EN | CGLS_EN;
+ } else {
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+
+ data &= ~(CGCG_EN | CGLS_EN);
+ }
+
+ if (orig != data)
+ WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+
+ u32 data, orig, tmp = 0;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data = 0x96940200;
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(CP_MEM_SLP_CNTL);
+ data |= CP_MEM_LS_EN;
+ if (orig != data)
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data &= 0xffffffc0;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ tmp = gfx_v6_0_halt_rlc(adev);
+
+ WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+
+ gfx_v6_0_update_rlc(adev, tmp);
+ } else {
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000003;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ data = RREG32(CP_MEM_SLP_CNTL);
+ if (data & CP_MEM_LS_EN) {
+ data &= ~CP_MEM_LS_EN;
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data |= LS_OVERRIDE | OVERRIDE;
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+
+ tmp = gfx_v6_0_halt_rlc(adev);
+
+ WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+
+ gfx_v6_0_update_rlc(adev, tmp);
+ }
+}
+/*
+static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
+ bool enable)
+{
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+ if (enable) {
+ gfx_v6_0_enable_mgcg(adev, true);
+ gfx_v6_0_enable_cgcg(adev, true);
+ } else {
+ gfx_v6_0_enable_cgcg(adev, false);
+ gfx_v6_0_enable_mgcg(adev, false);
+ }
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+}
+*/
+static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
+ bool enable)
+{
+}
+
+static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
+ bool enable)
+{
+}
+
+static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
+ data &= ~0x8000;
+ else
+ data |= 0x8000;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+{
+}
+/*
+static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ if (adev->asic_type == CHIP_KAVERI)
+ max_me = 5;
+
+ if (adev->gfx.rlc.cp_table_ptr == NULL)
+ return;
+
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+*/
+static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+
+ u32 tmp;
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
+ WREG32(RLC_TTOP_D, tmp);
+
+ tmp = RREG32(RLC_PG_CNTL);
+ tmp |= GFX_PG_ENABLE;
+ WREG32(RLC_PG_CNTL, tmp);
+
+ tmp = RREG32(RLC_AUTO_PG_CTRL);
+ tmp |= AUTO_PG_EN;
+ WREG32(RLC_AUTO_PG_CTRL, tmp);
+ } else {
+ tmp = RREG32(RLC_AUTO_PG_CTRL);
+ tmp &= ~AUTO_PG_EN;
+ WREG32(RLC_AUTO_PG_CTRL, tmp);
+
+ tmp = RREG32(DB_RENDER_CONTROL);
+ }
+}
+
+static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
+ u32 se, u32 sh)
+{
+
+ u32 mask = 0, tmp, tmp1;
+ int i;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
+ tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ tmp &= 0xffff0000;
+
+ tmp |= tmp1;
+ tmp >>= 16;
+
+ for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+
+ return (~tmp) & mask;
+}
+
+static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
+{
+ u32 i, j, k, active_cu_number = 0;
+
+ u32 mask, counter, cu_bitmap;
+ u32 tmp = 0;
+
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ cu_bitmap = 0;
+ counter = 0;
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
+ if (counter < 2)
+ cu_bitmap |= mask;
+ counter++;
+ }
+ mask <<= 1;
+ }
+
+ active_cu_number += counter;
+ tmp |= (cu_bitmap << (i * 16 + j * 8));
+ }
+ }
+
+ WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+ tmp = RREG32(RLC_MAX_PG_CU);
+ tmp &= ~MAX_PU_CU_MASK;
+ tmp |= MAX_PU_CU(active_cu_number);
+ WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
+ data |= STATIC_PER_CU_PG_ENABLE;
+ else
+ data &= ~STATIC_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
+ data |= DYN_PER_CU_PG_ENABLE;
+ else
+ data &= ~DYN_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+
+ tmp = RREG32(RLC_PG_CNTL);
+ tmp |= GFX_PG_SRC;
+ WREG32(RLC_PG_CNTL, tmp);
+
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+ tmp = RREG32(RLC_AUTO_PG_CTRL);
+
+ tmp &= ~GRBM_REG_SGIT_MASK;
+ tmp |= GRBM_REG_SGIT(0x700);
+ tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
+ WREG32(RLC_AUTO_PG_CTRL, tmp);
+}
+
+static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
+{
+ gfx_v6_0_enable_gfx_cgpg(adev, enable);
+ gfx_v6_0_enable_gfx_static_mgpg(adev, enable);
+ gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable);
+}
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (adev->gfx.rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
+ }
+ /* pa_sc_raster_config */
+ count += 3;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
+ volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (adev->gfx.rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ } else {
+ return;
+ }
+ }
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ buffer[count++] = cpu_to_le32(0x2a00126a);
+ break;
+ case CHIP_VERDE:
+ buffer[count++] = cpu_to_le32(0x0000124a);
+ break;
+ case CHIP_OLAND:
+ buffer[count++] = cpu_to_le32(0x00000082);
+ break;
+ case CHIP_HAINAN:
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ default:
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
+{
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v6_0_enable_sclk_slowdown_on_pu(adev, true);
+ gfx_v6_0_enable_sclk_slowdown_on_pd(adev, true);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+ gfx_v6_0_init_gfx_cgpg(adev);
+ gfx_v6_0_enable_cp_pg(adev, true);
+ gfx_v6_0_enable_gds_pg(adev, true);
+ } else {
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+ }
+ gfx_v6_0_init_ao_cu_mask(adev);
+ gfx_v6_0_update_gfx_pg(adev, true);
+ } else {
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+ }
+}
+
+static void gfx_v6_0_fini_pg(struct amdgpu_device *adev)
+{
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v6_0_update_gfx_pg(adev, false);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+ gfx_v6_0_enable_cp_pg(adev, false);
+ gfx_v6_0_enable_gds_pg(adev, false);
+ }
+ }
+}
+
+static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+ uint64_t clock;
+
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
+ WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ return clock;
+}
+
+static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ amdgpu_ring_write(ring, 0x80000000);
+ amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 6; /* gfx_v6_0_ring_emit_ib */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+ 3; /* gfx_v6_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v6_0_ring_emit_vm_flush */
+ 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v6_0_select_se_sh,
+};
+
+static int gfx_v6_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+ gfx_v6_0_set_ring_funcs(adev);
+ gfx_v6_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+static int gfx_v6_0_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, r;
+
+ r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+
+ gfx_v6_0_scratch_init(adev);
+
+ r = gfx_v6_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load gfx firmware!\n");
+ return r;
+ }
+
+ r = gfx_v6_0_rlc_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init rlc BOs!\n");
+ return r;
+ }
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ ring->ring_obj = NULL;
+ sprintf(ring->name, "gfx");
+ r = amdgpu_ring_init(adev, ring, 1024,
+ 0x80000000, 0xf,
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
+ AMDGPU_RING_TYPE_GFX);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ unsigned irq_type;
+
+ if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
+ DRM_ERROR("Too many (%d) compute rings!\n", i);
+ break;
+ }
+ ring = &adev->gfx.compute_ring[i];
+ ring->ring_obj = NULL;
+ ring->use_doorbell = false;
+ ring->doorbell_index = 0;
+ ring->me = 1;
+ ring->pipe = i;
+ ring->queue = i;
+ sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
+ r = amdgpu_ring_init(adev, ring, 1024,
+ 0x80000000, 0xf,
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_TYPE_COMPUTE);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static int gfx_v6_0_sw_fini(void *handle)
+{
+ int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
+ amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
+ amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+ gfx_v6_0_rlc_fini(adev);
+
+ return 0;
+}
+
+static int gfx_v6_0_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gfx_v6_0_gpu_init(adev);
+
+ r = gfx_v6_0_rlc_resume(adev);
+ if (r)
+ return r;
+
+ r = gfx_v6_0_cp_resume(adev);
+ if (r)
+ return r;
+
+ adev->gfx.ce_ram_size = 0x8000;
+
+ return r;
+}
+
+static int gfx_v6_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gfx_v6_0_cp_enable(adev, false);
+ gfx_v6_0_rlc_stop(adev);
+ gfx_v6_0_fini_pg(adev);
+
+ return 0;
+}
+
+static int gfx_v6_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return gfx_v6_0_hw_fini(adev);
+}
+
+static int gfx_v6_0_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return gfx_v6_0_hw_init(adev);
+}
+
+static bool gfx_v6_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+ return false;
+ else
+ return true;
+}
+
+static int gfx_v6_0_wait_for_idle(void *handle)
+{
+ unsigned i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (gfx_v6_0_is_idle(handle))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int gfx_v6_0_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+ int ring,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+ switch (state){
+ case AMDGPU_IRQ_STATE_DISABLE:
+ if (ring == 0) {
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+ cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+ break;
+ } else {
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+ cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+ break;
+
+ }
+ case AMDGPU_IRQ_STATE_ENABLE:
+ if (ring == 0) {
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+ cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+ break;
+ } else {
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+ cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+ break;
+
+ }
+
+ default:
+ BUG();
+ break;
+
+ }
+}
+
+static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CP_IRQ_GFX_EOP:
+ gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+ gfx_v6_0_set_compute_eop_interrupt_state(adev, 0, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+ gfx_v6_0_set_compute_eop_interrupt_state(adev, 1, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+ break;
+ case 1:
+ case 2:
+ amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal register access in command stream\n");
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal instruction in command stream\n");
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int gfx_v6_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ bool gate = false;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (state == AMD_CG_STATE_GATE)
+ gate = true;
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+ if (gate) {
+ gfx_v6_0_enable_mgcg(adev, true);
+ gfx_v6_0_enable_cgcg(adev, true);
+ } else {
+ gfx_v6_0_enable_cgcg(adev, false);
+ gfx_v6_0_enable_mgcg(adev, false);
+ }
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+ return 0;
+}
+
+static int gfx_v6_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ bool gate = false;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (state == AMD_PG_STATE_GATE)
+ gate = true;
+
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v6_0_update_gfx_pg(adev, gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+ gfx_v6_0_enable_cp_pg(adev, gate);
+ gfx_v6_0_enable_gds_pg(adev, gate);
+ }
+ }
+
+ return 0;
+}
+
+const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+ .name = "gfx_v6_0",
+ .early_init = gfx_v6_0_early_init,
+ .late_init = NULL,
+ .sw_init = gfx_v6_0_sw_init,
+ .sw_fini = gfx_v6_0_sw_fini,
+ .hw_init = gfx_v6_0_hw_init,
+ .hw_fini = gfx_v6_0_hw_fini,
+ .suspend = gfx_v6_0_suspend,
+ .resume = gfx_v6_0_resume,
+ .is_idle = gfx_v6_0_is_idle,
+ .wait_for_idle = gfx_v6_0_wait_for_idle,
+ .soft_reset = gfx_v6_0_soft_reset,
+ .set_clockgating_state = gfx_v6_0_set_clockgating_state,
+ .set_powergating_state = gfx_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+ .get_rptr = gfx_v6_0_ring_get_rptr,
+ .get_wptr = gfx_v6_0_ring_get_wptr,
+ .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
+ .parse_cs = NULL,
+ .emit_ib = gfx_v6_0_ring_emit_ib,
+ .emit_fence = gfx_v6_0_ring_emit_fence,
+ .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+ .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = gfx_v6_0_ring_test_ring,
+ .test_ib = gfx_v6_0_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
+ .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+ .get_rptr = gfx_v6_0_ring_get_rptr,
+ .get_wptr = gfx_v6_0_ring_get_wptr,
+ .set_wptr = gfx_v6_0_ring_set_wptr_compute,
+ .parse_cs = NULL,
+ .emit_ib = gfx_v6_0_ring_emit_ib,
+ .emit_fence = gfx_v6_0_ring_emit_fence,
+ .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+ .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = gfx_v6_0_ring_test_ring,
+ .test_ib = gfx_v6_0_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
+};
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ adev->gfx.gfx_ring[i].funcs = &gfx_v6_0_ring_funcs_gfx;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ adev->gfx.compute_ring[i].funcs = &gfx_v6_0_ring_funcs_compute;
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs = {
+ .set = gfx_v6_0_set_eop_interrupt_state,
+ .process = gfx_v6_0_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs = {
+ .set = gfx_v6_0_set_priv_reg_fault_state,
+ .process = gfx_v6_0_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs = {
+ .set = gfx_v6_0_set_priv_inst_fault_state,
+ .process = gfx_v6_0_priv_inst_irq,
+};
+
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+ adev->gfx.eop_irq.funcs = &gfx_v6_0_eop_irq_funcs;
+
+ adev->gfx.priv_reg_irq.num_types = 1;
+ adev->gfx.priv_reg_irq.funcs = &gfx_v6_0_priv_reg_irq_funcs;
+
+ adev->gfx.priv_inst_irq.num_types = 1;
+ adev->gfx.priv_inst_irq.funcs = &gfx_v6_0_priv_inst_irq_funcs;
+}
+
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
+{
+ int i, j, k, counter, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+
+ memset(cu_info, 0, sizeof(*cu_info));
+
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ ao_bitmap = 0;
+ counter = 0;
+ bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+ cu_info->bitmap[i][j] = bitmap;
+
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ if (bitmap & mask) {
+ if (counter < 2)
+ ao_bitmap |= mask;
+ counter ++;
+ }
+ mask <<= 1;
+ }
+ active_cu_number += counter;
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ }
+ }
+
+ cu_info->number = active_cu_number;
+ cu_info->ao_cu_mask = ao_cu_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
new file mode 100644
index 000000000000..b9657e72b248
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V6_0_H__
+#define __GFX_V6_0_H__
+
+extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d869d058ef24..71116da9e782 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask;
}
+static void
+gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_HAWAII:
+ *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+ RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
+ PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
+ SE_YSEL(3);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_KAVERI:
+ *rconf |= RB_MAP_PKR0(2);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ *rconf |= 0x0;
+ *rconf1 |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void
+gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, u32 raster_config_1,
+ unsigned rb_mask, unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+ (!se_mask[2] && !se_mask[3]))) {
+ raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+ if (!se_mask[0] && !se_mask[1]) {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+ } else {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+ }
+ }
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on CI+ */
+ gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on CI+ */
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
/**
* gfx_v7_0_setup_rb - setup the RBs on the asic
*
@@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
+ u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
}
}
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
+ gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes) {
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ } else {
+ gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+ mutex_unlock(&adev->grbm_idx_mutex);
}
/**
@@ -2096,6 +2254,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
+static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+ uint32_t dw2 = 0;
+
+ dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+ if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+ /* set load_global_config & load_global_uconfig */
+ dw2 |= 0x8001;
+ /* set load_cs_sh_regs */
+ dw2 |= 0x01000000;
+ /* set load_per_context_state & load_gfx_sh_regs */
+ dw2 |= 0x10002;
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ amdgpu_ring_write(ring, dw2);
+ amdgpu_ring_write(ring, 0);
+}
+
/**
* gfx_v7_0_ring_test_ib - basic ring IB test
*
@@ -2443,7 +2620,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
return 0;
}
-static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->rptr_offs];
}
@@ -2463,11 +2640,6 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
(void)RREG32(mmCP_RB0_WPTR);
}
-static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
- return ring->adev->wb.wb[ring->rptr_offs];
-}
-
static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
/* XXX check if swapping is necessary on BE */
@@ -2755,8 +2927,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
u64 wb_gpu_addr;
u32 *buf;
struct bonaire_mqd *mqd;
-
- gfx_v7_0_cp_compute_enable(adev, true);
+ struct amdgpu_ring *ring;
/* fix up chicken bits */
tmp = RREG32(mmCP_CPF_DEBUG);
@@ -2791,7 +2962,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
/* init the queues. Just two for now. */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+ ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj == NULL) {
r = amdgpu_bo_create(adev,
@@ -2970,6 +3141,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
amdgpu_bo_unreserve(ring->mqd_obj);
ring->ready = true;
+ }
+
+ gfx_v7_0_cp_compute_enable(adev, true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+
r = amdgpu_ring_test_ring(ring);
if (r)
ring->ready = false;
@@ -4176,6 +4354,41 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* gfx_v7_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+ 3; /* gfx_v7_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* gfx_v7_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v7_0_ring_emit_vm_flush */
+ 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
@@ -4465,24 +4678,21 @@ static int gfx_v7_0_sw_init(void *handle)
}
/* reserve GDS, GWS and OA resource for gfx */
- r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GDS, 0,
- NULL, NULL, &adev->gds.gds_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+ &adev->gds.gds_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GWS, 0,
- NULL, NULL, &adev->gds.gws_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+ &adev->gds.gws_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_OA, 0,
- NULL, NULL, &adev->gds.oa_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+ &adev->gds.oa_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -4498,9 +4708,9 @@ static int gfx_v7_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
- amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
- amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+ amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4931,7 +5141,7 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
- .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
+ .get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
.parse_cs = NULL,
@@ -4946,10 +5156,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
+ .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
+ .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
- .get_rptr = gfx_v7_0_ring_get_rptr_compute,
+ .get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
.parse_cs = NULL,
@@ -4964,6 +5177,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
+ .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bff8668e9e6d..6c6ff57b1c95 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] =
static const u32 golden_settings_polaris11_a11[] =
{
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+ mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] =
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
- mmSQ_CONFIG, 0x07f80000, 0x07180000,
+ mmSQ_CONFIG, 0x07f80000, 0x01180000,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
@@ -301,8 +302,8 @@ static const u32 polaris11_golden_common_all[] =
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
- mmCB_HW_CONTROL_2, 0, 0x0f000000,
+ mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -409,6 +410,7 @@ static const u32 golden_settings_iceland_a11[] =
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -505,8 +507,10 @@ static const u32 cz_golden_settings_a11[] =
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
@@ -699,7 +703,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
polaris10_golden_common_all,
(const u32)ARRAY_SIZE(polaris10_golden_common_all));
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
- if (adev->pdev->revision == 0xc7) {
+ if (adev->pdev->revision == 0xc7 &&
+ ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
+ (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
+ (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
}
@@ -1229,10 +1236,9 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
adev->gfx.rlc.clear_state_obj = NULL;
}
@@ -1244,7 +1250,6 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
adev->gfx.rlc.cp_table_obj = NULL;
}
@@ -1286,14 +1291,14 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
&adev->gfx.rlc.clear_state_gpu_addr);
if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
@@ -1328,7 +1333,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
&adev->gfx.rlc.cp_table_gpu_addr);
if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
@@ -1341,7 +1346,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
}
return 0;
@@ -1357,7 +1361,6 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
adev->gfx.mec.hpd_eop_obj = NULL;
}
@@ -2078,24 +2081,21 @@ static int gfx_v8_0_sw_init(void *handle)
}
/* reserve GDS, GWS and OA resource for gfx */
- r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
- NULL, &adev->gds.gds_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+ &adev->gds.gds_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
- NULL, &adev->gds.gws_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+ &adev->gds.gws_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_OA, 0, NULL,
- NULL, &adev->gds.oa_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+ &adev->gds.oa_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -2113,9 +2113,9 @@ static int gfx_v8_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
- amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
- amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+ amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2123,9 +2123,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v8_0_mec_fini(adev);
-
gfx_v8_0_rlc_fini(adev);
-
gfx_v8_0_free_microcode(adev);
return 0;
@@ -3461,19 +3459,16 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
else
data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
- if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (se_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (sh_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- } else {
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- }
+
WREG32(mmGRBM_GFX_INDEX, data);
}
@@ -3486,11 +3481,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(mmCC_RB_BACKEND_DISABLE);
- data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ data = RREG32(mmCC_RB_BACKEND_DISABLE) |
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+ data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se);
@@ -3498,13 +3492,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask;
}
+static void
+gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+ RB_XSEL2(1) | PKR_MAP(2) |
+ PKR_XSEL(1) | PKR_YSEL(1) |
+ SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_TOPAZ:
+ case CHIP_CARRIZO:
+ *rconf |= RB_MAP_PKR0(2);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_POLARIS11:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_STONEY:
+ *rconf |= 0x0;
+ *rconf1 |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void
+gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, u32 raster_config_1,
+ unsigned rb_mask, unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+ (!se_mask[2] && !se_mask[3]))) {
+ raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+ if (!se_mask[0] && !se_mask[1]) {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+ } else {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+ }
+ }
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on VI */
+ gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on VI */
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
+ u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -3516,10 +3660,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
+ gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes) {
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ } else {
+ gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+
+ mutex_unlock(&adev->grbm_idx_mutex);
}
/**
@@ -3572,16 +3732,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- tmp = RREG32(mmGRBM_CNTL);
- tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
- WREG32(mmGRBM_CNTL, tmp);
-
+ WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
gfx_v8_0_tiling_mode_table_init(adev);
-
gfx_v8_0_setup_rb(adev);
gfx_v8_0_get_cu_info(adev);
@@ -3765,9 +3921,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
sizeof(indirect_start_offsets)/sizeof(int));
/* save and restore list */
- temp = RREG32(mmRLC_SRM_CNTL);
- temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
- WREG32(mmRLC_SRM_CNTL, temp);
+ WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
WREG32(mmRLC_SRM_ARAM_ADDR, 0);
for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
@@ -3804,11 +3958,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
- uint32_t data;
-
- data = RREG32(mmRLC_SRM_CNTL);
- data |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
- WREG32(mmRLC_SRM_CNTL, data);
+ WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
@@ -3818,75 +3968,34 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG)) {
- data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
- data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
- data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
- WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
-
- data = 0;
- data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
- WREG32(mmRLC_PG_DELAY, data);
+ WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
- data = RREG32(mmRLC_PG_DELAY_2);
- data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
- data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
- WREG32(mmRLC_PG_DELAY_2, data);
+ data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+ WREG32(mmRLC_PG_DELAY, data);
- data = RREG32(mmRLC_AUTO_PG_CTRL);
- data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
- data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
- WREG32(mmRLC_AUTO_PG_CTRL, data);
+ WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
}
}
static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
}
static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
}
static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
- else
- data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
}
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
@@ -3923,36 +4032,26 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
}
}
-void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
- WREG32(mmRLC_CNTL, tmp);
+ WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
-
gfx_v8_0_wait_for_rlc_serdes(adev);
}
static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmGRBM_SOFT_RESET);
-
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- WREG32(mmGRBM_SOFT_RESET, tmp);
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
- WREG32(mmGRBM_SOFT_RESET, tmp);
+
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
- WREG32(mmRLC_CNTL, tmp);
+ WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
@@ -3994,14 +4093,13 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10)
+ adev->asic_type == CHIP_POLARIS10)
WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
/* disable PG */
WREG32(mmRLC_PG_CNTL, 0);
gfx_v8_0_rlc_reset(adev);
-
gfx_v8_0_init_pg(adev);
if (!adev->pp_enabled) {
@@ -4296,12 +4394,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
gfx_v8_0_cp_gfx_start(adev);
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
- if (r) {
+ if (r)
ring->ready = false;
- return r;
- }
- return 0;
+ return r;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4976,7 +5072,6 @@ static int gfx_v8_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v8_0_init_golden_registers(adev);
-
gfx_v8_0_gpu_init(adev);
r = gfx_v8_0_rlc_resume(adev);
@@ -4984,8 +5079,6 @@ static int gfx_v8_0_hw_init(void *handle)
return r;
r = gfx_v8_0_cp_resume(adev);
- if (r)
- return r;
return r;
}
@@ -5033,25 +5126,22 @@ static bool gfx_v8_0_is_idle(void *handle)
static int gfx_v8_0_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
-
- if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+ if (gfx_v8_0_is_idle(handle))
return 0;
+
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v8_0_soft_reset(void *handle)
+static int gfx_v8_0_check_soft_reset(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* GRBM_STATUS */
tmp = RREG32(mmGRBM_STATUS);
@@ -5060,16 +5150,12 @@ static int gfx_v8_0_soft_reset(void *handle)
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
- GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+ GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
+ GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
- }
-
- if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
- grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
- GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
}
@@ -5080,73 +5166,199 @@ static int gfx_v8_0_soft_reset(void *handle)
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+ if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
+ REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
+ REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPF, 1);
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPC, 1);
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPG, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+ SOFT_RESET_GRBM, 1);
+ }
+
/* SRBM_STATUS */
tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+ if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
- /* stop the rlc */
- gfx_v8_0_rlc_stop(adev);
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
+ adev->gfx.grbm_soft_reset = grbm_soft_reset;
+ adev->gfx.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
+ adev->gfx.grbm_soft_reset = 0;
+ adev->gfx.srbm_soft_reset = 0;
+ }
+
+ return 0;
+}
+
+static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ int i;
+
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
+ u32 tmp;
+ tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
+ DEQUEUE_REQ, 2);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
+ break;
+ udelay(1);
+ }
+ }
+}
+static int gfx_v8_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ return 0;
+
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+ /* stop the rlc */
+ gfx_v8_0_rlc_stop(adev);
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
/* Disable GFX parsing/prefetching */
gfx_v8_0_cp_gfx_enable(adev, false);
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ gfx_v8_0_inactive_hqd(adev, ring);
+ }
/* Disable MEC parsing/prefetching */
gfx_v8_0_cp_compute_enable(adev, false);
+ }
- if (grbm_soft_reset || srbm_soft_reset) {
- tmp = RREG32(mmGMCON_DEBUG);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_STALL, 1);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_CLEAR, 1);
- WREG32(mmGMCON_DEBUG, tmp);
+ return 0;
+}
- udelay(50);
- }
+static int gfx_v8_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
- if (grbm_soft_reset) {
- tmp = RREG32(mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ return 0;
- udelay(50);
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
- tmp &= ~grbm_soft_reset;
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
- }
+ if (grbm_soft_reset || srbm_soft_reset) {
+ tmp = RREG32(mmGMCON_DEBUG);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
+ WREG32(mmGMCON_DEBUG, tmp);
+ udelay(50);
+ }
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
+ if (grbm_soft_reset) {
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ }
- if (grbm_soft_reset || srbm_soft_reset) {
- tmp = RREG32(mmGMCON_DEBUG);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_STALL, 0);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_CLEAR, 0);
- WREG32(mmGMCON_DEBUG, tmp);
- }
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
}
+
+ if (grbm_soft_reset || srbm_soft_reset) {
+ tmp = RREG32(mmGMCON_DEBUG);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
+ WREG32(mmGMCON_DEBUG, tmp);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ return 0;
+}
+
+static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
+ WREG32(mmCP_HQD_PQ_RPTR, 0);
+ WREG32(mmCP_HQD_PQ_WPTR, 0);
+ vi_srbm_select(adev, 0, 0, 0, 0);
+}
+
+static int gfx_v8_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ return 0;
+
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
+ gfx_v8_0_cp_gfx_resume(adev);
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ gfx_v8_0_init_hqd(adev, ring);
+ }
+ gfx_v8_0_cp_compute_resume(adev);
+ }
+ gfx_v8_0_rlc_start(adev);
+
return 0;
}
@@ -5265,8 +5477,6 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- uint32_t data, temp;
-
if (adev->asic_type == CHIP_POLARIS11)
/* Send msg to SMU via Powerplay */
amdgpu_set_powergating_state(adev,
@@ -5274,83 +5484,35 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
enable ?
AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
- temp = data = RREG32(mmRLC_PG_CNTL);
- /* Enable static MGPG */
- if (enable)
- data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- uint32_t data, temp;
-
- temp = data = RREG32(mmRLC_PG_CNTL);
- /* Enable dynamic MGPG */
- if (enable)
- data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- uint32_t data, temp;
-
- temp = data = RREG32(mmRLC_PG_CNTL);
- /* Enable quick PG */
- if (enable)
- data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
}
static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
}
static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
/* Read any GFX register to wake up GFX. */
if (!enable)
- data = RREG32(mmDB_RENDER_CONTROL);
+ RREG32(mmDB_RENDER_CONTROL);
}
static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
@@ -5426,15 +5588,15 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
data = RREG32(mmRLC_SERDES_WR_CTRL);
if (adev->asic_type == CHIP_STONEY)
- data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
- RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
- RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
- RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
- RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
- RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
- RLC_SERDES_WR_CTRL__POWER_UP_MASK |
- RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
- RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
+ data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+ RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
+ RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
+ RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
+ RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
+ RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
+ RLC_SERDES_WR_CTRL__POWER_UP_MASK |
+ RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
+ RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
else
data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
@@ -5457,10 +5619,10 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
#define MSG_ENTER_RLC_SAFE_MODE 1
#define MSG_EXIT_RLC_SAFE_MODE 0
-
-#define RLC_GPR_REG2__REQ_MASK 0x00000001
-#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
-#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
+#define RLC_GPR_REG2__REQ_MASK 0x00000001
+#define RLC_GPR_REG2__REQ__SHIFT 0
+#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
{
@@ -5490,7 +5652,7 @@ static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
break;
udelay(1);
}
@@ -5518,7 +5680,7 @@ static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
break;
udelay(1);
}
@@ -5550,7 +5712,7 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -5577,7 +5739,7 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -5618,21 +5780,12 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
- if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
/* 1 - RLC memory Light sleep */
- temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
- data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
- if (temp != data)
- WREG32(mmRLC_MEM_SLP_CNTL, data);
- }
+ WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
- if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
- /* 2 - CP memory Light sleep */
- temp = data = RREG32(mmCP_MEM_SLP_CNTL);
- data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- if (temp != data)
- WREG32(mmCP_MEM_SLP_CNTL, data);
- }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
+ WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
}
/* 3 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5830,6 +5983,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
+static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_3D,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_RLC,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CP,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
static int gfx_v8_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -5842,33 +6065,33 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
gfx_v8_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
+ case CHIP_TONGA:
+ gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
+ break;
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
+ break;
default:
break;
}
return 0;
}
-static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
{
- u32 rptr;
-
- rptr = ring->adev->wb.wb[ring->rptr_offs];
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs];
}
static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 wptr;
if (ring->use_doorbell)
/* XXX check if swapping is necessary on BE */
- wptr = ring->adev->wb.wb[ring->wptr_offs];
+ return ring->adev->wb.wb[ring->wptr_offs];
else
- wptr = RREG32(mmCP_RB0_WPTR);
-
- return wptr;
+ return RREG32(mmCP_RB0_WPTR);
}
static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
@@ -5935,12 +6158,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
{
u32 header, control = 0;
- /* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (ctx_switch) {
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- }
-
if (ib->flags & AMDGPU_IB_FLAG_CE)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
else
@@ -5967,9 +6184,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
- (2 << 0) |
+ (2 << 0) |
#endif
- (ib->gpu_addr & 0xFFFFFFFC));
+ (ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
@@ -6010,14 +6227,6 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
-
- if (usepfp) {
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- }
}
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -6025,6 +6234,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
+ if (usepfp)
+ amdgpu_ring_insert_nop(ring, 128);
+
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)) |
@@ -6064,18 +6277,11 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
+ /* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
+ amdgpu_ring_insert_nop(ring, 128);
}
}
-static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
- return ring->adev->wb.wb[ring->rptr_offs];
-}
-
static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->wptr_offs];
@@ -6111,36 +6317,88 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, upper_32_bits(seq));
}
-static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
- enum amdgpu_interrupt_state state)
+static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
{
- u32 cp_int_cntl;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+}
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl =
- REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
+static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+ uint32_t dw2 = 0;
+
+ dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+ if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+ /* set load_global_config & load_global_uconfig */
+ dw2 |= 0x8001;
+ /* set load_cs_sh_regs */
+ dw2 |= 0x01000000;
+ /* set load_per_context_state & load_gfx_sh_regs for GFX */
+ dw2 |= 0x10002;
+
+ /* set load_ce_ram if preamble presented */
+ if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
+ dw2 |= 0x10000000;
+ } else {
+ /* still load_ce_ram if this is the first time preamble presented
+ * although there is no context switch happens.
+ */
+ if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
+ dw2 |= 0x10000000;
}
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ amdgpu_ring_write(ring, dw2);
+ amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* gfx_v8_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+ 2 + /* gfx_v8_ring_emit_sb */
+ 3; /* gfx_v8_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* gfx_v8_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
+static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+ enum amdgpu_interrupt_state state)
+{
+ WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
}
static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
int me, int pipe,
enum amdgpu_interrupt_state state)
{
- u32 mec_int_cntl, mec_int_cntl_reg;
-
/*
* amdgpu controls only pipe 0 of MEC1. That's why this function only
* handles the setting of interrupts for this specific pipe. All other
@@ -6150,7 +6408,6 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
if (me == 1) {
switch (pipe) {
case 0:
- mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
break;
default:
DRM_DEBUG("invalid pipe %d\n", pipe);
@@ -6161,22 +6418,8 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
return;
}
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
- mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
- mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
}
static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
@@ -6184,24 +6427,8 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
return 0;
}
@@ -6211,24 +6438,8 @@ static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
return 0;
}
@@ -6334,13 +6545,16 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.resume = gfx_v8_0_resume,
.is_idle = gfx_v8_0_is_idle,
.wait_for_idle = gfx_v8_0_wait_for_idle,
+ .check_soft_reset = gfx_v8_0_check_soft_reset,
+ .pre_soft_reset = gfx_v8_0_pre_soft_reset,
.soft_reset = gfx_v8_0_soft_reset,
+ .post_soft_reset = gfx_v8_0_post_soft_reset,
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
- .get_rptr = gfx_v8_0_ring_get_rptr_gfx,
+ .get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.parse_cs = NULL,
@@ -6355,10 +6569,14 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_switch_buffer = gfx_v8_ring_emit_sb,
+ .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
+ .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
+ .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
- .get_rptr = gfx_v8_0_ring_get_rptr_compute,
+ .get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.parse_cs = NULL,
@@ -6373,6 +6591,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
+ .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6475,15 +6695,12 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
- data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-
- data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
- data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+ RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
- return (~data) & mask;
+ return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
}
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index bc82c794312c..ebed1f829297 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -26,6 +26,4 @@
extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
new file mode 100644
index 000000000000..b13c8aaec078
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -0,0 +1,1071 @@
+
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "gmc_v6_0.h"
+#include "amdgpu_ucode.h"
+#include "si/sid.h"
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v6_0_wait_for_idle(void *handle);
+
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+
+static const u32 crtc_offsets[6] =
+{
+ SI_CRTC0_REGISTER_OFFSET,
+ SI_CRTC1_REGISTER_OFFSET,
+ SI_CRTC2_REGISTER_OFFSET,
+ SI_CRTC3_REGISTER_OFFSET,
+ SI_CRTC4_REGISTER_OFFSET,
+ SI_CRTC5_REGISTER_OFFSET
+};
+
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 blackout;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_stop_mc_access(adev, save);
+
+ gmc_v6_0_wait_for_idle((void *)adev);
+
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
+ /* Block CPU access */
+ WREG32(BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout = REG_SET_FIELD(blackout,
+ mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+ /* wait for the MC to settle */
+ udelay(100);
+
+}
+
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 tmp;
+
+ /* unblackout the MC */
+ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+ /* allow CPU access */
+ tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
+ tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
+ WREG32(BIF_FB_EN, tmp);
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_resume_mc_access(adev, save);
+
+}
+
+static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_name = "tahiti";
+ break;
+ case CHIP_PITCAIRN:
+ chip_name = "pitcairn";
+ break;
+ case CHIP_VERDE:
+ chip_name = "verde";
+ break;
+ case CHIP_OLAND:
+ chip_name = "oland";
+ break;
+ case CHIP_HAINAN:
+ chip_name = "hainan";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->mc.fw);
+
+out:
+ if (err) {
+ dev_err(adev->dev,
+ "si_mc: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
+ }
+ return err;
+}
+
+static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
+{
+ const __le32 *new_fw_data = NULL;
+ u32 running;
+ const __le32 *new_io_mc_regs = NULL;
+ int i, regs_size, ucode_size;
+ const struct mc_firmware_header_v1_0 *hdr;
+
+ if (!adev->mc.fw)
+ return -EINVAL;
+
+ hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+
+ amdgpu_ucode_print_mc_hdr(&hdr->header);
+
+ adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+ if (running == 0) {
+
+ /* reset the engine and set to writable */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+ /* load mc io regs */
+ for (i = 0; i < regs_size; i++) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ }
+ /* load the MC ucode */
+ for (i = 0; i < ucode_size; i++) {
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ }
+
+ /* put the engine back into the active state */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+ /* wait for training to complete */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+ break;
+ udelay(1);
+ }
+
+ }
+
+ return 0;
+}
+
+static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc)
+{
+ if (mc->mc_vram_size > 0xFFC0000000ULL) {
+ dev_warn(adev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xFFC0000000ULL;
+ mc->mc_vram_size = 0xFFC0000000ULL;
+ }
+ amdgpu_vram_location(adev, &adev->mc, 0);
+ adev->mc.gtt_base_align = 0;
+ amdgpu_gtt_location(adev, mc);
+}
+
+static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+ WREG32((0xb05 + j), 0x00000000);
+ WREG32((0xb06 + j), 0x00000000);
+ WREG32((0xb07 + j), 0x00000000);
+ WREG32((0xb08 + j), 0x00000000);
+ WREG32((0xb09 + j), 0x00000000);
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ gmc_v6_0_mc_stop(adev, &save);
+
+ if (gmc_v6_0_wait_for_idle((void *)adev)) {
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->mc.vram_end >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+ adev->vram_scratch.gpu_addr >> 12);
+ tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ /* XXX double check these! */
+ WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+
+ if (gmc_v6_0_wait_for_idle((void *)adev)) {
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+ gmc_v6_0_mc_resume(adev, &save);
+ amdgpu_display_set_vga_render_state(adev, false);
+}
+
+static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
+{
+
+ u32 tmp;
+ int chansize, numchan;
+
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+ /* size in MB on si */
+ adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.visible_vram_size = adev->mc.aper_size;
+
+ /* unless the user had overridden it, set the gart
+ * size equal to the 1024 or vram, whichever is larger.
+ */
+ if (amdgpu_gart_size == -1)
+ adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+ else
+ adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+
+ gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+
+ return 0;
+}
+
+static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid)
+{
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+ WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
+ void *cpu_pt_addr,
+ uint32_t gpu_page_idx,
+ uint64_t addr,
+ uint32_t flags)
+{
+ void __iomem *ptr = (void *)cpu_pt_addr;
+ uint64_t value;
+
+ value = addr & 0xFFFFFFFFFFFFF000ULL;
+ value |= flags;
+ writeq(value, ptr + (gpu_page_idx * 8));
+
+ return 0;
+}
+
+static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
+ bool value)
+{
+ u32 tmp;
+
+ tmp = RREG32(VM_CONTEXT1_CNTL);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ WREG32(VM_CONTEXT1_CNTL, tmp);
+}
+
+static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ if (adev->gart.robj == NULL) {
+ dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL,
+ (0xA << 7) |
+ ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ ENABLE_ADVANCED_DRIVER_MODEL |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ BANK_SELECT(4) |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+ /* setup context0 */
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT0_CNTL2, 0);
+ WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+ WREG32(0x575, 0);
+ WREG32(0x576, 0);
+ WREG32(0x577, 0);
+
+ /* empty context1-15 */
+ /* set vm size, must be a multiple of 4 */
+ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
+ /* Assign the pt base to something valid for now; the pts used for
+ * the VMs are determined by the application and setup and assigned
+ * on the fly in the vm part of radeon_gart.c
+ */
+ for (i = 1; i < 16; i++) {
+ if (i < 8)
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+ adev->gart.table_addr >> 12);
+ else
+ WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+ adev->gart.table_addr >> 12);
+ }
+
+ /* enable context1-15 */
+ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT1_CNTL2, 4);
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+ gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
+ dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned long long)adev->gart.table_addr);
+ adev->gart.ready = true;
+ return 0;
+}
+
+static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->gart.robj) {
+ dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
+ return 0;
+ }
+ r = amdgpu_gart_init(adev);
+ if (r)
+ return r;
+ adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+ return amdgpu_gart_table_vram_alloc(adev);
+}
+
+static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+{
+ /*unsigned i;
+
+ for (i = 1; i < 16; ++i) {
+ uint32_t reg;
+ if (i < 8)
+ reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
+ else
+ reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
+ adev->vm_manager.saved_table_addr[i] = RREG32(reg);
+ }*/
+
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+ amdgpu_gart_table_vram_unpin(adev);
+}
+
+static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
+{
+ amdgpu_gart_table_vram_free(adev);
+ amdgpu_gart_fini(adev);
+}
+
+static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
+{
+ /*
+ * number of VMs
+ * VMID 0 is reserved for System
+ * amdgpu graphics/compute will use VMIDs 1-7
+ * amdkfd will use VMIDs 8-15
+ */
+ adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+ amdgpu_vm_manager_init(adev);
+
+ /* base offset of vram pages */
+ if (adev->flags & AMD_IS_APU) {
+ u64 tmp = RREG32(MC_VM_FB_OFFSET);
+ tmp <<= 22;
+ adev->vm_manager.vram_base_offset = tmp;
+ } else
+ adev->vm_manager.vram_base_offset = 0;
+
+ return 0;
+}
+
+static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
+{
+}
+
+static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
+ u32 status, u32 addr, u32 mc_client)
+{
+ u32 mc_id;
+ u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
+ u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ xxPROTECTIONS);
+ char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+ (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+ mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ xxMEMORY_CLIENT_ID);
+
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ protections, vmid, addr,
+ REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ xxMEMORY_CLIENT_RW) ?
+ "write" : "read", block, mc_client, mc_id);
+}
+
+/*
+static const u32 mc_cg_registers[] = {
+ MC_HUB_MISC_HUB_CG,
+ MC_HUB_MISC_SIP_CG,
+ MC_HUB_MISC_VM_CG,
+ MC_XPB_CLK_GAT,
+ ATC_MISC_CG,
+ MC_CITF_MISC_WR_CG,
+ MC_CITF_MISC_RD_CG,
+ MC_CITF_MISC_VM_CG,
+ VM_L2_CG,
+};
+
+static const u32 mc_cg_ls_en[] = {
+ MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
+ MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
+ MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+ MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
+ ATC_MISC_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+ VM_L2_CG__MEM_LS_ENABLE_MASK,
+};
+
+static const u32 mc_cg_en[] = {
+ MC_HUB_MISC_HUB_CG__ENABLE_MASK,
+ MC_HUB_MISC_SIP_CG__ENABLE_MASK,
+ MC_HUB_MISC_VM_CG__ENABLE_MASK,
+ MC_XPB_CLK_GAT__ENABLE_MASK,
+ ATC_MISC_CG__ENABLE_MASK,
+ MC_CITF_MISC_WR_CG__ENABLE_MASK,
+ MC_CITF_MISC_RD_CG__ENABLE_MASK,
+ MC_CITF_MISC_VM_CG__ENABLE_MASK,
+ VM_L2_CG__ENABLE_MASK,
+};
+
+static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ data |= mc_cg_ls_en[i];
+ else
+ data &= ~mc_cg_ls_en[i];
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ data |= mc_cg_en[i];
+ else
+ data &= ~mc_cg_en[i];
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
+ } else {
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
+ }
+
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_CNTL2, data);
+}
+
+static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
+ else
+ data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
+
+ if (orig != data)
+ WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_MEM_POWER_LS);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
+ else
+ data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
+
+ if (orig != data)
+ WREG32(HDP_MEM_POWER_LS, data);
+}
+*/
+
+static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
+{
+ switch (mc_seq_vram_type) {
+ case MC_SEQ_MISC0__MT__GDDR1:
+ return AMDGPU_VRAM_TYPE_GDDR1;
+ case MC_SEQ_MISC0__MT__DDR2:
+ return AMDGPU_VRAM_TYPE_DDR2;
+ case MC_SEQ_MISC0__MT__GDDR3:
+ return AMDGPU_VRAM_TYPE_GDDR3;
+ case MC_SEQ_MISC0__MT__GDDR4:
+ return AMDGPU_VRAM_TYPE_GDDR4;
+ case MC_SEQ_MISC0__MT__GDDR5:
+ return AMDGPU_VRAM_TYPE_GDDR5;
+ case MC_SEQ_MISC0__MT__DDR3:
+ return AMDGPU_VRAM_TYPE_DDR3;
+ default:
+ return AMDGPU_VRAM_TYPE_UNKNOWN;
+ }
+}
+
+static int gmc_v6_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gmc_v6_0_set_gart_funcs(adev);
+ gmc_v6_0_set_irq_funcs(adev);
+
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(MC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+ }
+
+ return 0;
+}
+
+static int gmc_v6_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
+static int gmc_v6_0_sw_init(void *handle)
+{
+ int r;
+ int dma_bits;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+ if (r)
+ return r;
+
+ adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+
+ adev->mc.mc_mask = 0xffffffffffULL;
+
+ adev->need_dma32 = false;
+ dma_bits = adev->need_dma32 ? 32 : 40;
+ r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ adev->need_dma32 = true;
+ dma_bits = 32;
+ dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+ }
+ r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
+ dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
+ }
+
+ r = gmc_v6_0_init_microcode(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to load mc firmware!\n");
+ return r;
+ }
+
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
+
+ r = gmc_v6_0_mc_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_init(adev);
+ if (r)
+ return r;
+
+ r = gmc_v6_0_gart_init(adev);
+ if (r)
+ return r;
+
+ if (!adev->vm_manager.enabled) {
+ r = gmc_v6_0_vm_init(adev);
+ if (r) {
+ dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+ return r;
+ }
+ adev->vm_manager.enabled = true;
+ }
+
+ return r;
+}
+
+static int gmc_v6_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->vm_manager.enabled) {
+ gmc_v6_0_vm_fini(adev);
+ adev->vm_manager.enabled = false;
+ }
+ gmc_v6_0_gart_fini(adev);
+ amdgpu_gem_force_release(adev);
+ amdgpu_bo_fini(adev);
+
+ return 0;
+}
+
+static int gmc_v6_0_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gmc_v6_0_mc_program(adev);
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = gmc_v6_0_mc_load_microcode(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to load MC firmware!\n");
+ return r;
+ }
+ }
+
+ r = gmc_v6_0_gart_enable(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int gmc_v6_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+ gmc_v6_0_gart_disable(adev);
+
+ return 0;
+}
+
+static int gmc_v6_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->vm_manager.enabled) {
+ gmc_v6_0_vm_fini(adev);
+ adev->vm_manager.enabled = false;
+ }
+ gmc_v6_0_hw_fini(adev);
+
+ return 0;
+}
+
+static int gmc_v6_0_resume(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = gmc_v6_0_hw_init(adev);
+ if (r)
+ return r;
+
+ if (!adev->vm_manager.enabled) {
+ r = gmc_v6_0_vm_init(adev);
+ if (r) {
+ dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+ return r;
+ }
+ adev->vm_manager.enabled = true;
+ }
+
+ return r;
+}
+
+static bool gmc_v6_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
+ return false;
+
+ return true;
+}
+
+static int gmc_v6_0_wait_for_idle(void *handle)
+{
+ unsigned i;
+ u32 tmp;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+ SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK |
+ SRBM_STATUS__MCD_BUSY_MASK |
+ SRBM_STATUS__VMC_BUSY_MASK);
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+
+}
+
+static int gmc_v6_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_mc_save save;
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
+
+ if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
+ if (!(adev->flags & AMD_IS_APU))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
+ }
+
+ if (srbm_soft_reset) {
+ gmc_v6_0_mc_stop(adev, &save);
+ if (gmc_v6_0_wait_for_idle(adev)) {
+ dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+ }
+
+
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ gmc_v6_0_mc_resume(adev, &save);
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp;
+ u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ tmp = RREG32(VM_CONTEXT0_CNTL);
+ tmp &= ~bits;
+ WREG32(VM_CONTEXT0_CNTL, tmp);
+ tmp = RREG32(VM_CONTEXT1_CNTL);
+ tmp &= ~bits;
+ WREG32(VM_CONTEXT1_CNTL, tmp);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ tmp = RREG32(VM_CONTEXT0_CNTL);
+ tmp |= bits;
+ WREG32(VM_CONTEXT0_CNTL, tmp);
+ tmp = RREG32(VM_CONTEXT1_CNTL);
+ tmp |= bits;
+ WREG32(VM_CONTEXT1_CNTL, tmp);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 addr, status;
+
+ addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+ status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+
+ if (!addr && !status)
+ return 0;
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+ gmc_v6_0_set_fault_enable_default(adev, false);
+
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+
+ return 0;
+}
+
+static int gmc_v6_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int gmc_v6_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+ .name = "gmc_v6_0",
+ .early_init = gmc_v6_0_early_init,
+ .late_init = gmc_v6_0_late_init,
+ .sw_init = gmc_v6_0_sw_init,
+ .sw_fini = gmc_v6_0_sw_fini,
+ .hw_init = gmc_v6_0_hw_init,
+ .hw_fini = gmc_v6_0_hw_fini,
+ .suspend = gmc_v6_0_suspend,
+ .resume = gmc_v6_0_resume,
+ .is_idle = gmc_v6_0_is_idle,
+ .wait_for_idle = gmc_v6_0_wait_for_idle,
+ .soft_reset = gmc_v6_0_soft_reset,
+ .set_clockgating_state = gmc_v6_0_set_clockgating_state,
+ .set_powergating_state = gmc_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+ .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
+ .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+ .set = gmc_v6_0_vm_fault_interrupt_state,
+ .process = gmc_v6_0_process_interrupt,
+};
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+{
+ if (adev->gart.gart_funcs == NULL)
+ adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+}
+
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->mc.vm_fault.num_types = 1;
+ adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
new file mode 100644
index 000000000000..42c4fc676cd4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GMC_V6_0_H__
+#define __GMC_V6_0_H__
+
+extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index d24a82bd0c7a..aa0c4b964621 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -144,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
return 0;
default: BUG();
}
@@ -182,7 +183,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 running, blackout = 0;
+ u32 running;
int i, ucode_size, regs_size;
if (!adev->mc.fw)
@@ -202,11 +203,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
if (running == 0) {
- if (running) {
- blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-
/* reset the engine and set to writable */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -238,9 +234,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
break;
udelay(1);
}
-
- if (running)
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -392,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -952,6 +945,11 @@ static int gmc_v7_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
+
r = gmc_v7_0_mc_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 717359d3ba8c..1b319f5bc696 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -103,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
+static const u32 golden_settings_stoney_common[] =
+{
+ mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
+ mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
+};
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
@@ -142,6 +147,9 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_stoney_common,
+ (const u32)ARRAY_SIZE(golden_settings_stoney_common));
break;
default:
break;
@@ -253,7 +261,7 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 running, blackout = 0;
+ u32 running;
int i, ucode_size, regs_size;
if (!adev->mc.fw)
@@ -261,8 +269,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
/* Skip MC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
+ * Skip MC ucode loading on VF, because hypervisor will do that
+ * for this adaptor.
*/
- if (adev->virtualization.supports_sr_iov)
+ if (amdgpu_sriov_bios(adev))
return 0;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
@@ -279,11 +289,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
if (running == 0) {
- if (running) {
- blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-
/* reset the engine and set to writable */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -315,9 +320,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
break;
udelay(1);
}
-
- if (running)
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -469,7 +471,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -949,6 +951,11 @@ static int gmc_v8_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
+
r = gmc_v8_0_mc_init(adev);
if (r)
return r;
@@ -1092,9 +1099,8 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static int gmc_v8_0_soft_reset(void *handle)
+static int gmc_v8_0_check_soft_reset(void *handle)
{
- struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1109,13 +1115,42 @@ static int gmc_v8_0_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
-
if (srbm_soft_reset) {
- gmc_v8_0_mc_stop(adev, &save);
- if (gmc_v8_0_wait_for_idle((void *)adev)) {
- dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
- }
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
+ adev->mc.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
+ adev->mc.srbm_soft_reset = 0;
+ }
+ return 0;
+}
+
+static int gmc_v8_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ return 0;
+ gmc_v8_0_mc_stop(adev, &adev->mc.save);
+ if (gmc_v8_0_wait_for_idle(adev)) {
+ dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+ }
+
+ return 0;
+}
+
+static int gmc_v8_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ return 0;
+ srbm_soft_reset = adev->mc.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
@@ -1131,14 +1166,22 @@ static int gmc_v8_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- gmc_v8_0_mc_resume(adev, &save);
- udelay(50);
}
return 0;
}
+static int gmc_v8_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ return 0;
+
+ gmc_v8_0_mc_resume(adev, &adev->mc.save);
+ return 0;
+}
+
static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -1406,7 +1449,10 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.resume = gmc_v8_0_resume,
.is_idle = gmc_v8_0_is_idle,
.wait_for_idle = gmc_v8_0_wait_for_idle,
+ .check_soft_reset = gmc_v8_0_check_soft_reset,
+ .pre_soft_reset = gmc_v8_0_pre_soft_reset,
.soft_reset = gmc_v8_0_soft_reset,
+ .post_soft_reset = gmc_v8_0_post_soft_reset,
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
deleted file mode 100644
index 2f078ad6095c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "iceland_smum.h"
-
-MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int iceland_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- iceland_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/topaz_smc.bin";
- int err;
-
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int iceland_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = iceland_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int iceland_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int iceland_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- /* smu init only needs to be called at startup, not resume.
- * It should be in sw_init, but requires the fw info gathered
- * in sw_init from other IP modules.
- */
- ret = iceland_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = iceland_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int iceland_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
- /* smu fini only needs to be called at teardown, not suspend.
- * It should be in sw_fini, but we put it here for symmetry
- * with smu init.
- */
- iceland_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int iceland_dpm_suspend(void *handle)
-{
- return 0;
-}
-
-static int iceland_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = iceland_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
-fail:
- mutex_unlock(&adev->pm.mutex);
- return ret;
-}
-
-static int iceland_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int iceland_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs iceland_dpm_ip_funcs = {
- .name = "iceland_dpm",
- .early_init = iceland_dpm_early_init,
- .late_init = NULL,
- .sw_init = iceland_dpm_sw_init,
- .sw_fini = iceland_dpm_sw_fini,
- .hw_init = iceland_dpm_hw_init,
- .hw_fini = iceland_dpm_hw_fini,
- .suspend = iceland_dpm_suspend,
- .resume = iceland_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = iceland_dpm_set_clockgating_state,
- .set_powergating_state = iceland_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &iceland_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
deleted file mode 100644
index 211839913728..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "ppsmc.h"
-#include "iceland_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
-
-#define ICELAND_SMC_SIZE 0x20000
-
-static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
- uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
- uint32_t smc_start_address,
- const uint8_t *src,
- uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-void iceland_start_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-void iceland_reset_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-static int iceland_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-void iceland_stop_smc_clock(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-void iceland_start_smc_clock(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return iceland_send_msg_to_smc(adev, msg);
-}
-
-static int iceland_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return iceland_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t data;
- unsigned long flags;
- int i;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > ICELAND_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
- break;
- udelay(1);
- }
- val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
- WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
-
- iceland_stop_smc_clock(adev);
- iceland_reset_smc(adev);
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- while (byte_count >= 4) {
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
- WREG32(mmSMC_IND_DATA_0, data);
- src += 4;
- byte_count -= 4;
- }
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = iceland_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = iceland_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int iceland_smu_stop_smc(struct amdgpu_device *adev)
-{
- iceland_reset_smc(adev);
- iceland_stop_smc_clock(adev);
-
- return 0;
-}
-#endif
-
-static int iceland_smu_start_smc(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- iceland_program_jump_on_start(adev);
- iceland_start_smc_clock(adev);
- iceland_start_smc(adev);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
- break;
- udelay(1);
- }
- return 0;
-}
-
-static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC2;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
-
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
- entry->flags = 0;
-
- return 0;
-}
-
-static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK |
- UCODE_ID_CP_MEC_JT1_MASK;
-
-
- if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int iceland_smu_start(struct amdgpu_device *adev)
-{
- int result;
-
- result = iceland_smu_upload_firmware_image(adev);
- if (result)
- return result;
- result = iceland_smu_start_smc(adev);
- if (result)
- return result;
-
- return iceland_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
- .check_fw_load_finish = iceland_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int iceland_smu_init(struct amdgpu_device *adev)
-{
- struct iceland_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
-
- return 0;
-}
-
-int iceland_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a845e883f5fa..f8618a3881a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2845,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
+
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (amdgpu_bapm == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
new file mode 100644
index 000000000000..055321f61ca7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __R600_DPM_H__
+#define __R600_DPM_H__
+
+#define R600_ASI_DFLT 10000
+#define R600_BSP_DFLT 0x41EB
+#define R600_BSU_DFLT 0x2
+#define R600_AH_DFLT 5
+#define R600_RLP_DFLT 25
+#define R600_RMP_DFLT 65
+#define R600_LHP_DFLT 40
+#define R600_LMP_DFLT 15
+#define R600_TD_DFLT 0
+#define R600_UTC_DFLT_00 0x24
+#define R600_UTC_DFLT_01 0x22
+#define R600_UTC_DFLT_02 0x22
+#define R600_UTC_DFLT_03 0x22
+#define R600_UTC_DFLT_04 0x22
+#define R600_UTC_DFLT_05 0x22
+#define R600_UTC_DFLT_06 0x22
+#define R600_UTC_DFLT_07 0x22
+#define R600_UTC_DFLT_08 0x22
+#define R600_UTC_DFLT_09 0x22
+#define R600_UTC_DFLT_10 0x22
+#define R600_UTC_DFLT_11 0x22
+#define R600_UTC_DFLT_12 0x22
+#define R600_UTC_DFLT_13 0x22
+#define R600_UTC_DFLT_14 0x22
+#define R600_DTC_DFLT_00 0x24
+#define R600_DTC_DFLT_01 0x22
+#define R600_DTC_DFLT_02 0x22
+#define R600_DTC_DFLT_03 0x22
+#define R600_DTC_DFLT_04 0x22
+#define R600_DTC_DFLT_05 0x22
+#define R600_DTC_DFLT_06 0x22
+#define R600_DTC_DFLT_07 0x22
+#define R600_DTC_DFLT_08 0x22
+#define R600_DTC_DFLT_09 0x22
+#define R600_DTC_DFLT_10 0x22
+#define R600_DTC_DFLT_11 0x22
+#define R600_DTC_DFLT_12 0x22
+#define R600_DTC_DFLT_13 0x22
+#define R600_DTC_DFLT_14 0x22
+#define R600_VRC_DFLT 0x0000C003
+#define R600_VOLTAGERESPONSETIME_DFLT 1000
+#define R600_BACKBIASRESPONSETIME_DFLT 1000
+#define R600_VRU_DFLT 0x3
+#define R600_SPLLSTEPTIME_DFLT 0x1000
+#define R600_SPLLSTEPUNIT_DFLT 0x3
+#define R600_TPU_DFLT 0
+#define R600_TPC_DFLT 0x200
+#define R600_SSTU_DFLT 0
+#define R600_SST_DFLT 0x00C8
+#define R600_GICST_DFLT 0x200
+#define R600_FCT_DFLT 0x0400
+#define R600_FCTU_DFLT 0
+#define R600_CTXCGTT3DRPHC_DFLT 0x20
+#define R600_CTXCGTT3DRSDC_DFLT 0x40
+#define R600_VDDC3DOORPHC_DFLT 0x100
+#define R600_VDDC3DOORSDC_DFLT 0x7
+#define R600_VDDC3DOORSU_DFLT 0
+#define R600_MPLLLOCKTIME_DFLT 100
+#define R600_MPLLRESETTIME_DFLT 150
+#define R600_VCOSTEPPCT_DFLT 20
+#define R600_ENDINGVCOSTEPPCT_DFLT 5
+#define R600_REFERENCEDIVIDER_DFLT 4
+
+#define R600_PM_NUMBER_OF_TC 15
+#define R600_PM_NUMBER_OF_SCLKS 20
+#define R600_PM_NUMBER_OF_MCLKS 4
+#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define R600_TEMP_RANGE_MIN (90 * 1000)
+#define R600_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum r600_power_level {
+ R600_POWER_LEVEL_LOW = 0,
+ R600_POWER_LEVEL_MEDIUM = 1,
+ R600_POWER_LEVEL_HIGH = 2,
+ R600_POWER_LEVEL_CTXSW = 3,
+};
+
+enum r600_td {
+ R600_TD_AUTO,
+ R600_TD_UP,
+ R600_TD_DOWN,
+};
+
+enum r600_display_watermark {
+ R600_DISPLAY_WATERMARK_LOW = 0,
+ R600_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum r600_display_gap
+{
+ R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+ R600_PM_DISPLAY_GAP_VBLANK = 1,
+ R600_PM_DISPLAY_GAP_WATERMARK = 2,
+ R600_PM_DISPLAY_GAP_IGNORE = 3,
+};
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 1351c7e834a2..565dab3c7218 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -190,12 +190,8 @@ out:
*/
static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
{
- u32 rptr;
-
/* XXX check if swapping is necessary on BE */
- rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs] >> 2;
}
/**
@@ -714,7 +710,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
- } else if (r) {
+ } else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
@@ -749,24 +745,16 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -774,39 +762,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -822,40 +798,21 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
@@ -945,6 +902,22 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 7 + 6; /* sdma_v2_4_ring_emit_ib */
+}
+
+static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
+ 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+ 12 + /* sdma_v2_4_ring_emit_vm_flush */
+ 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+}
+
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1263,6 +1236,8 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
+ .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
+ .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 653ce5ed55ae..f325fd86430b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -335,12 +335,8 @@ out:
*/
static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
{
- u32 rptr;
-
/* XXX check if swapping is necessary on BE */
- rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs] >> 2;
}
/**
@@ -499,31 +495,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
-unsigned init_cond_exec(struct amdgpu_ring *ring)
-{
- unsigned ret;
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, 1);
- ret = ring->wptr;/* this is the offset we need patch later */
- amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
- return ret;
-}
-
-void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
- unsigned cur;
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
- cur = ring->wptr - 1;
- if (likely(cur > offset))
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
-}
-
-
/**
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
*
@@ -976,24 +947,16 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -1001,39 +964,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
+{
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -1049,40 +1000,21 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
@@ -1172,6 +1104,22 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 7 + 6; /* sdma_v3_0_ring_emit_ib */
+}
+
+static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 6 + /* sdma_v3_0_ring_emit_hdp_flush */
+ 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+ 12 + /* sdma_v3_0_ring_emit_vm_flush */
+ 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1320,28 +1268,79 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v3_0_soft_reset(void *handle)
+static int sdma_v3_0_check_soft_reset(void *handle)
{
- u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
+ (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
}
if (srbm_soft_reset) {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
+ adev->sdma.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
+ adev->sdma.srbm_soft_reset = 0;
+ }
+
+ return 0;
+}
+
+static int sdma_v3_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+ REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
+ }
+
+ return 0;
+}
+
+static int sdma_v3_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+ REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+ sdma_v3_0_gfx_resume(adev);
+ sdma_v3_0_rlc_resume(adev);
+ }
+
+ return 0;
+}
+
+static int sdma_v3_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1559,6 +1558,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.resume = sdma_v3_0_resume,
.is_idle = sdma_v3_0_is_idle,
.wait_for_idle = sdma_v3_0_wait_for_idle,
+ .check_soft_reset = sdma_v3_0_check_soft_reset,
+ .pre_soft_reset = sdma_v3_0_pre_soft_reset,
+ .post_soft_reset = sdma_v3_0_post_soft_reset,
.soft_reset = sdma_v3_0_soft_reset,
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
@@ -1579,6 +1581,8 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
+ .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
new file mode 100644
index 000000000000..dc9511c5ecb8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -0,0 +1,1965 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "atom.h"
+#include "amdgpu_powerplay.h"
+#include "si/sid.h"
+#include "si_ih.h"
+#include "gfx_v6_0.h"
+#include "gmc_v6_0.h"
+#include "si_dma.h"
+#include "dce_v6_0.h"
+#include "si.h"
+
+static const u32 tahiti_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
+ 0x09df, 0x00000003, 0x000007ff,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x2a00126a,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x23a2, 0x01ff1f3f, 0x00000000,
+ 0x23a1, 0x01ff1f3f, 0x00000000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x00000200, 0x000002fb,
+ 0x2b04, 0xffffffff, 0x0000543b,
+ 0x2b03, 0xffffffff, 0xa9210876,
+ 0x2234, 0xffffffff, 0x000fff40,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0504, 0x20000000, 0x20fffed8,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+ 0x0319, 0x00000001, 0x00000001
+};
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+ 0x3109, 0xffffffff, 0x00601005,
+ 0x311f, 0xffffffff, 0x10104040,
+ 0x3122, 0xffffffff, 0x0100000a,
+ 0x30c5, 0xffffffff, 0x00000800,
+ 0x30c3, 0xffffffff, 0x800000f4,
+ 0x3d2a, 0xffffffff, 0x00000000
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x2a00126a,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x000003ff, 0x000000f7,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b03, 0xffffffff, 0x32761054,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+ 0x3109, 0xffffffff, 0x00601004,
+ 0x311f, 0xffffffff, 0x10102020,
+ 0x3122, 0xffffffff, 0x01000020,
+ 0x30c5, 0xffffffff, 0x00000800,
+ 0x30c3, 0xffffffff, 0x800000a4
+};
+
+static const u32 verde_pg_init[] =
+{
+ 0xd4f, 0xffffffff, 0x40000,
+ 0xd4e, 0xffffffff, 0x200010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x7007,
+ 0xd4e, 0xffffffff, 0x300010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x400000,
+ 0xd4e, 0xffffffff, 0x100010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x120200,
+ 0xd4e, 0xffffffff, 0x500010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x1e1e16,
+ 0xd4e, 0xffffffff, 0x600010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x171f1e,
+ 0xd4e, 0xffffffff, 0x700010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4e, 0xffffffff, 0x9ff,
+ 0xd40, 0xffffffff, 0x0,
+ 0xd41, 0xffffffff, 0x10000800,
+ 0xd41, 0xffffffff, 0xf,
+ 0xd41, 0xffffffff, 0xf,
+ 0xd40, 0xffffffff, 0x4,
+ 0xd41, 0xffffffff, 0x1000051e,
+ 0xd41, 0xffffffff, 0xffff,
+ 0xd41, 0xffffffff, 0xffff,
+ 0xd40, 0xffffffff, 0x8,
+ 0xd41, 0xffffffff, 0x80500,
+ 0xd40, 0xffffffff, 0x12,
+ 0xd41, 0xffffffff, 0x9050c,
+ 0xd40, 0xffffffff, 0x1d,
+ 0xd41, 0xffffffff, 0xb052c,
+ 0xd40, 0xffffffff, 0x2a,
+ 0xd41, 0xffffffff, 0x1053e,
+ 0xd40, 0xffffffff, 0x2d,
+ 0xd41, 0xffffffff, 0x10546,
+ 0xd40, 0xffffffff, 0x30,
+ 0xd41, 0xffffffff, 0xa054e,
+ 0xd40, 0xffffffff, 0x3c,
+ 0xd41, 0xffffffff, 0x1055f,
+ 0xd40, 0xffffffff, 0x3f,
+ 0xd41, 0xffffffff, 0x10567,
+ 0xd40, 0xffffffff, 0x42,
+ 0xd41, 0xffffffff, 0x1056f,
+ 0xd40, 0xffffffff, 0x45,
+ 0xd41, 0xffffffff, 0x10572,
+ 0xd40, 0xffffffff, 0x48,
+ 0xd41, 0xffffffff, 0x20575,
+ 0xd40, 0xffffffff, 0x4c,
+ 0xd41, 0xffffffff, 0x190801,
+ 0xd40, 0xffffffff, 0x67,
+ 0xd41, 0xffffffff, 0x1082a,
+ 0xd40, 0xffffffff, 0x6a,
+ 0xd41, 0xffffffff, 0x1b082d,
+ 0xd40, 0xffffffff, 0x87,
+ 0xd41, 0xffffffff, 0x310851,
+ 0xd40, 0xffffffff, 0xba,
+ 0xd41, 0xffffffff, 0x891,
+ 0xd40, 0xffffffff, 0xbc,
+ 0xd41, 0xffffffff, 0x893,
+ 0xd40, 0xffffffff, 0xbe,
+ 0xd41, 0xffffffff, 0x20895,
+ 0xd40, 0xffffffff, 0xc2,
+ 0xd41, 0xffffffff, 0x20899,
+ 0xd40, 0xffffffff, 0xc6,
+ 0xd41, 0xffffffff, 0x2089d,
+ 0xd40, 0xffffffff, 0xca,
+ 0xd41, 0xffffffff, 0x8a1,
+ 0xd40, 0xffffffff, 0xcc,
+ 0xd41, 0xffffffff, 0x8a3,
+ 0xd40, 0xffffffff, 0xce,
+ 0xd41, 0xffffffff, 0x308a5,
+ 0xd40, 0xffffffff, 0xd3,
+ 0xd41, 0xffffffff, 0x6d08cd,
+ 0xd40, 0xffffffff, 0x142,
+ 0xd41, 0xffffffff, 0x2000095a,
+ 0xd41, 0xffffffff, 0x1,
+ 0xd40, 0xffffffff, 0x144,
+ 0xd41, 0xffffffff, 0x301f095b,
+ 0xd40, 0xffffffff, 0x165,
+ 0xd41, 0xffffffff, 0xc094d,
+ 0xd40, 0xffffffff, 0x173,
+ 0xd41, 0xffffffff, 0xf096d,
+ 0xd40, 0xffffffff, 0x184,
+ 0xd41, 0xffffffff, 0x15097f,
+ 0xd40, 0xffffffff, 0x19b,
+ 0xd41, 0xffffffff, 0xc0998,
+ 0xd40, 0xffffffff, 0x1a9,
+ 0xd41, 0xffffffff, 0x409a7,
+ 0xd40, 0xffffffff, 0x1af,
+ 0xd41, 0xffffffff, 0xcdc,
+ 0xd40, 0xffffffff, 0x1b1,
+ 0xd41, 0xffffffff, 0x800,
+ 0xd42, 0xffffffff, 0x6c9b2000,
+ 0xd44, 0xfc00, 0x2000,
+ 0xd51, 0xffffffff, 0xfc0,
+ 0xa35, 0x00000100, 0x100
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+ 0x3109, 0xffffffff, 0x033f1005,
+ 0x311f, 0xffffffff, 0x10808020,
+ 0x3122, 0xffffffff, 0x00800008,
+ 0x30c5, 0xffffffff, 0x00001000,
+ 0x30c3, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+
+ 0x1c0c, 0x31000311, 0x00000011,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x2285, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+
+ 0xa293, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x0000124a,
+ 0xa0d4, 0x3f3f3fff, 0x0000124a,
+ 0xa0d4, 0x3f3f3fff, 0x0000124a,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x23a2, 0x01ff1f3f, 0x00000000,
+ 0x23a3, 0x01ff1f3f, 0x00000000,
+ 0x23a2, 0x01ff1f3f, 0x00000000,
+ 0x23a1, 0x01ff1f3f, 0x00000000,
+ 0x23a1, 0x01ff1f3f, 0x00000000,
+
+ 0x23a1, 0x01ff1f3f, 0x00000000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b01, 0x000003ff, 0x00000003,
+ 0x2b05, 0x000003ff, 0x00000003,
+ 0x2b05, 0x000003ff, 0x00000003,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b03, 0xffffffff, 0x00001032,
+ 0x2b03, 0xffffffff, 0x00001032,
+ 0x2b03, 0xffffffff, 0x00001032,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f9, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x00000082,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x000003ff, 0x000000f3,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b03, 0xffffffff, 0x00003210,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+ 0x3109, 0xffffffff, 0x00601005,
+ 0x311f, 0xffffffff, 0x10104040,
+ 0x3122, 0xffffffff, 0x0100000a,
+ 0x30c5, 0xffffffff, 0x00000800,
+ 0x30c3, 0xffffffff, 0x800000f4
+};
+
+static const u32 hainan_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x4595, 0xff000fff, 0x00000100,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x3630, 0xff000fff, 0x00000100,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+ 0xa393, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x00000000,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x03e00000, 0x03600000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x000003ff, 0x000000f1,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b03, 0xffffffff, 0x00003210,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+ 0x263e, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2471, 0xffffffff, 0x00060005,
+ 0x2472, 0xffffffff, 0x00080007,
+ 0x2473, 0xffffffff, 0x0000000b,
+ 0x2474, 0xffffffff, 0x000a0009,
+ 0x2475, 0xffffffff, 0x000d000c,
+ 0x2476, 0xffffffff, 0x00070006,
+ 0x2477, 0xffffffff, 0x00090008,
+ 0x2478, 0xffffffff, 0x0000000c,
+ 0x2479, 0xffffffff, 0x000b000a,
+ 0x247a, 0xffffffff, 0x000e000d,
+ 0x247b, 0xffffffff, 0x00080007,
+ 0x247c, 0xffffffff, 0x000a0009,
+ 0x247d, 0xffffffff, 0x0000000d,
+ 0x247e, 0xffffffff, 0x000c000b,
+ 0x247f, 0xffffffff, 0x000f000e,
+ 0x2480, 0xffffffff, 0x00090008,
+ 0x2481, 0xffffffff, 0x000b000a,
+ 0x2482, 0xffffffff, 0x000c000f,
+ 0x2483, 0xffffffff, 0x000e000d,
+ 0x2484, 0xffffffff, 0x00110010,
+ 0x2485, 0xffffffff, 0x000a0009,
+ 0x2486, 0xffffffff, 0x000c000b,
+ 0x2487, 0xffffffff, 0x0000000f,
+ 0x2488, 0xffffffff, 0x000e000d,
+ 0x2489, 0xffffffff, 0x00110010,
+ 0x248a, 0xffffffff, 0x000b000a,
+ 0x248b, 0xffffffff, 0x000d000c,
+ 0x248c, 0xffffffff, 0x00000010,
+ 0x248d, 0xffffffff, 0x000f000e,
+ 0x248e, 0xffffffff, 0x00120011,
+ 0x248f, 0xffffffff, 0x000c000b,
+ 0x2490, 0xffffffff, 0x000e000d,
+ 0x2491, 0xffffffff, 0x00000011,
+ 0x2492, 0xffffffff, 0x0010000f,
+ 0x2493, 0xffffffff, 0x00130012,
+ 0x2494, 0xffffffff, 0x000d000c,
+ 0x2495, 0xffffffff, 0x000f000e,
+ 0x2496, 0xffffffff, 0x00100013,
+ 0x2497, 0xffffffff, 0x00120011,
+ 0x2498, 0xffffffff, 0x00150014,
+ 0x2499, 0xffffffff, 0x000e000d,
+ 0x249a, 0xffffffff, 0x0010000f,
+ 0x249b, 0xffffffff, 0x00000013,
+ 0x249c, 0xffffffff, 0x00120011,
+ 0x249d, 0xffffffff, 0x00150014,
+ 0x249e, 0xffffffff, 0x000f000e,
+ 0x249f, 0xffffffff, 0x00110010,
+ 0x24a0, 0xffffffff, 0x00000014,
+ 0x24a1, 0xffffffff, 0x00130012,
+ 0x24a2, 0xffffffff, 0x00160015,
+ 0x24a3, 0xffffffff, 0x0010000f,
+ 0x24a4, 0xffffffff, 0x00120011,
+ 0x24a5, 0xffffffff, 0x00000015,
+ 0x24a6, 0xffffffff, 0x00140013,
+ 0x24a7, 0xffffffff, 0x00170016,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x40b, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x993, 0x000c0000, 0x000c0000,
+ 0x992, 0x000c0000, 0x000c0000,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0x157a, 0x00000001, 0x00000001,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2480, 0xffffffff, 0x00090008,
+ 0x2481, 0xffffffff, 0x000b000a,
+ 0x2482, 0xffffffff, 0x000c000f,
+ 0x2483, 0xffffffff, 0x000e000d,
+ 0x2484, 0xffffffff, 0x00110010,
+ 0x2485, 0xffffffff, 0x000a0009,
+ 0x2486, 0xffffffff, 0x000c000b,
+ 0x2487, 0xffffffff, 0x0000000f,
+ 0x2488, 0xffffffff, 0x000e000d,
+ 0x2489, 0xffffffff, 0x00110010,
+ 0x248a, 0xffffffff, 0x000b000a,
+ 0x248b, 0xffffffff, 0x000d000c,
+ 0x248c, 0xffffffff, 0x00000010,
+ 0x248d, 0xffffffff, 0x000f000e,
+ 0x248e, 0xffffffff, 0x00120011,
+ 0x248f, 0xffffffff, 0x000c000b,
+ 0x2490, 0xffffffff, 0x000e000d,
+ 0x2491, 0xffffffff, 0x00000011,
+ 0x2492, 0xffffffff, 0x0010000f,
+ 0x2493, 0xffffffff, 0x00130012,
+ 0x2494, 0xffffffff, 0x000d000c,
+ 0x2495, 0xffffffff, 0x000f000e,
+ 0x2496, 0xffffffff, 0x00100013,
+ 0x2497, 0xffffffff, 0x00120011,
+ 0x2498, 0xffffffff, 0x00150014,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x40b, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0x157a, 0x00000001, 0x00000001,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 verde_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2480, 0xffffffff, 0x00090008,
+ 0x2481, 0xffffffff, 0x000b000a,
+ 0x2482, 0xffffffff, 0x000c000f,
+ 0x2483, 0xffffffff, 0x000e000d,
+ 0x2484, 0xffffffff, 0x00110010,
+ 0x2485, 0xffffffff, 0x000a0009,
+ 0x2486, 0xffffffff, 0x000c000b,
+ 0x2487, 0xffffffff, 0x0000000f,
+ 0x2488, 0xffffffff, 0x000e000d,
+ 0x2489, 0xffffffff, 0x00110010,
+ 0x248a, 0xffffffff, 0x000b000a,
+ 0x248b, 0xffffffff, 0x000d000c,
+ 0x248c, 0xffffffff, 0x00000010,
+ 0x248d, 0xffffffff, 0x000f000e,
+ 0x248e, 0xffffffff, 0x00120011,
+ 0x248f, 0xffffffff, 0x000c000b,
+ 0x2490, 0xffffffff, 0x000e000d,
+ 0x2491, 0xffffffff, 0x00000011,
+ 0x2492, 0xffffffff, 0x0010000f,
+ 0x2493, 0xffffffff, 0x00130012,
+ 0x2494, 0xffffffff, 0x000d000c,
+ 0x2495, 0xffffffff, 0x000f000e,
+ 0x2496, 0xffffffff, 0x00100013,
+ 0x2497, 0xffffffff, 0x00120011,
+ 0x2498, 0xffffffff, 0x00150014,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x40b, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x993, 0x000c0000, 0x000c0000,
+ 0x992, 0x000c0000, 0x000c0000,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0x157a, 0x00000001, 0x00000001,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 oland_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2471, 0xffffffff, 0x00060005,
+ 0x2472, 0xffffffff, 0x00080007,
+ 0x2473, 0xffffffff, 0x0000000b,
+ 0x2474, 0xffffffff, 0x000a0009,
+ 0x2475, 0xffffffff, 0x000d000c,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x40b, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x993, 0x000c0000, 0x000c0000,
+ 0x992, 0x000c0000, 0x000c0000,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0x157a, 0x00000001, 0x00000001,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2471, 0xffffffff, 0x00060005,
+ 0x2472, 0xffffffff, 0x00080007,
+ 0x2473, 0xffffffff, 0x0000000b,
+ 0x2474, 0xffffffff, 0x000a0009,
+ 0x2475, 0xffffffff, 0x000d000c,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x993, 0x000c0000, 0x000c0000,
+ 0x992, 0x000c0000, 0x000c0000,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+
+static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(AMDGPU_PCIE_INDEX, reg);
+ (void)RREG32(AMDGPU_PCIE_INDEX);
+ r = RREG32(AMDGPU_PCIE_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(AMDGPU_PCIE_INDEX, reg);
+ (void)RREG32(AMDGPU_PCIE_INDEX);
+ WREG32(AMDGPU_PCIE_DATA, v);
+ (void)RREG32(AMDGPU_PCIE_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+ (void)RREG32(PCIE_PORT_INDEX);
+ r = RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+ (void)RREG32(PCIE_PORT_INDEX);
+ WREG32(PCIE_PORT_DATA, (v));
+ (void)RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(SMC_IND_INDEX_0, (reg));
+ r = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return r;
+}
+
+static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(SMC_IND_INDEX_0, (reg));
+ WREG32(SMC_IND_DATA_0, (v));
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
+ {GRBM_STATUS, false},
+ {GB_ADDR_CONFIG, false},
+ {MC_ARB_RAMCFG, false},
+ {GB_TILE_MODE0, false},
+ {GB_TILE_MODE1, false},
+ {GB_TILE_MODE2, false},
+ {GB_TILE_MODE3, false},
+ {GB_TILE_MODE4, false},
+ {GB_TILE_MODE5, false},
+ {GB_TILE_MODE6, false},
+ {GB_TILE_MODE7, false},
+ {GB_TILE_MODE8, false},
+ {GB_TILE_MODE9, false},
+ {GB_TILE_MODE10, false},
+ {GB_TILE_MODE11, false},
+ {GB_TILE_MODE12, false},
+ {GB_TILE_MODE13, false},
+ {GB_TILE_MODE14, false},
+ {GB_TILE_MODE15, false},
+ {GB_TILE_MODE16, false},
+ {GB_TILE_MODE17, false},
+ {GB_TILE_MODE18, false},
+ {GB_TILE_MODE19, false},
+ {GB_TILE_MODE20, false},
+ {GB_TILE_MODE21, false},
+ {GB_TILE_MODE22, false},
+ {GB_TILE_MODE23, false},
+ {GB_TILE_MODE24, false},
+ {GB_TILE_MODE25, false},
+ {GB_TILE_MODE26, false},
+ {GB_TILE_MODE27, false},
+ {GB_TILE_MODE28, false},
+ {GB_TILE_MODE29, false},
+ {GB_TILE_MODE30, false},
+ {GB_TILE_MODE31, false},
+ {CC_RB_BACKEND_DISABLE, false, true},
+ {GC_USER_RB_BACKEND_DISABLE, false, true},
+ {PA_SC_RASTER_CONFIG, false, true},
+};
+
+static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num,
+ u32 reg_offset)
+{
+ uint32_t val;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+
+ val = RREG32(reg_offset);
+
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+}
+
+static int si_read_register(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 reg_offset, u32 *value)
+{
+ uint32_t i;
+
+ *value = 0;
+ for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
+ if (reg_offset != si_allowed_read_registers[i].reg_offset)
+ continue;
+
+ if (!si_allowed_read_registers[i].untouched)
+ *value = si_allowed_read_registers[i].grbm_indexed ?
+ si_read_indexed_register(adev, se_num,
+ sh_num, reg_offset) :
+ RREG32(reg_offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static bool si_read_disabled_bios(struct amdgpu_device *adev)
+{
+ u32 bus_cntl;
+ u32 d1vga_control = 0;
+ u32 d2vga_control = 0;
+ u32 vga_render_control = 0;
+ u32 rom_cntl;
+ bool r;
+
+ bus_cntl = RREG32(R600_BUS_CNTL);
+ if (adev->mode_info.num_crtc) {
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ }
+ rom_cntl = RREG32(R600_ROM_CNTL);
+
+ /* enable the rom */
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ if (adev->mode_info.num_crtc) {
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(VGA_RENDER_CONTROL,
+ (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+ }
+ WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+ r = amdgpu_read_bios(adev);
+
+ /* restore regs */
+ WREG32(R600_BUS_CNTL, bus_cntl);
+ if (adev->mode_info.num_crtc) {
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(VGA_RENDER_CONTROL, vga_render_control);
+ }
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ return r;
+}
+
+//xxx: not implemented
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static void si_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+ uint32_t temp;
+
+ temp = RREG32(CONFIG_CNTL);
+ if (state == false) {
+ temp &= ~(1<<0);
+ temp |= (1<<1);
+ } else {
+ temp &= ~(1<<1);
+ }
+ WREG32(CONFIG_CNTL, temp);
+}
+
+static u32 si_get_xclk(struct amdgpu_device *adev)
+{
+ u32 reference_clock = adev->clock.spll.reference_freq;
+ u32 tmp;
+
+ tmp = RREG32(CG_CLKPIN_CNTL_2);
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ tmp = RREG32(CG_CLKPIN_CNTL);
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
+//xxx:not implemented
+static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+ return 0;
+}
+
+static void si_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
+static const struct amdgpu_asic_funcs si_asic_funcs =
+{
+ .read_disabled_bios = &si_read_disabled_bios,
+ .detect_hw_virtualization = si_detect_hw_virtualization,
+ .read_register = &si_read_register,
+ .reset = &si_asic_reset,
+ .set_vga_state = &si_vga_set_state,
+ .get_xclk = &si_get_xclk,
+ .set_uvd_clocks = &si_set_uvd_clocks,
+ .set_vce_clocks = NULL,
+};
+
+static uint32_t si_get_rev_id(struct amdgpu_device *adev)
+{
+ return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+ >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static int si_common_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->smc_rreg = &si_smc_rreg;
+ adev->smc_wreg = &si_smc_wreg;
+ adev->pcie_rreg = &si_pcie_rreg;
+ adev->pcie_wreg = &si_pcie_wreg;
+ adev->pciep_rreg = &si_pciep_rreg;
+ adev->pciep_wreg = &si_pciep_wreg;
+ adev->uvd_ctx_rreg = NULL;
+ adev->uvd_ctx_wreg = NULL;
+ adev->didt_rreg = NULL;
+ adev->didt_wreg = NULL;
+
+ adev->asic_funcs = &si_asic_funcs;
+
+ adev->rev_id = si_get_rev_id(adev);
+ adev->external_rev_id = 0xFF;
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ break;
+ case CHIP_PITCAIRN:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ break;
+
+ case CHIP_VERDE:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ //???
+ adev->external_rev_id = adev->rev_id + 0x14;
+ break;
+ case CHIP_OLAND:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ break;
+ case CHIP_HAINAN:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int si_common_sw_init(void *handle)
+{
+ return 0;
+}
+
+static int si_common_sw_fini(void *handle)
+{
+ return 0;
+}
+
+
+static void si_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ amdgpu_program_register_sequence(adev,
+ tahiti_golden_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ tahiti_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+ amdgpu_program_register_sequence(adev,
+ tahiti_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ tahiti_golden_registers2,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+ break;
+ case CHIP_PITCAIRN:
+ amdgpu_program_register_sequence(adev,
+ pitcairn_golden_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ pitcairn_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+ amdgpu_program_register_sequence(adev,
+ pitcairn_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ case CHIP_VERDE:
+ amdgpu_program_register_sequence(adev,
+ verde_golden_registers,
+ (const u32)ARRAY_SIZE(verde_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ verde_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+ amdgpu_program_register_sequence(adev,
+ verde_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ verde_pg_init,
+ (const u32)ARRAY_SIZE(verde_pg_init));
+ break;
+ case CHIP_OLAND:
+ amdgpu_program_register_sequence(adev,
+ oland_golden_registers,
+ (const u32)ARRAY_SIZE(oland_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ oland_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+ amdgpu_program_register_sequence(adev,
+ oland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+ case CHIP_HAINAN:
+ amdgpu_program_register_sequence(adev,
+ hainan_golden_registers,
+ (const u32)ARRAY_SIZE(hainan_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ hainan_golden_registers2,
+ (const u32)ARRAY_SIZE(hainan_golden_registers2));
+ amdgpu_program_register_sequence(adev,
+ hainan_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+ break;
+
+
+ default:
+ BUG();
+ }
+}
+
+static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+ struct pci_dev *root = adev->pdev->bus->self;
+ int bridge_pos, gpu_pos;
+ u32 speed_cntl, mask, current_data_rate;
+ int ret, i;
+ u16 tmp16;
+
+ if (pci_is_root_bus(adev->pdev->bus))
+ return;
+
+ if (amdgpu_pcie_gen2 == 0)
+ return;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ return;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+ LC_CURRENT_DATA_RATE_SHIFT;
+ if (mask & DRM_PCIE_SPEED_80) {
+ if (current_data_rate == 2) {
+ DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ } else if (mask & DRM_PCIE_SPEED_50) {
+ if (current_data_rate == 1) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ }
+
+ bridge_pos = pci_pcie_cap(root);
+ if (!bridge_pos)
+ return;
+
+ gpu_pos = pci_pcie_cap(adev->pdev);
+ if (!gpu_pos)
+ return;
+
+ if (mask & DRM_PCIE_SPEED_80) {
+ if (current_data_rate != 2) {
+ u16 bridge_cfg, gpu_cfg;
+ u16 bridge_cfg2, gpu_cfg2;
+ u32 max_lw, current_lw, tmp;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+ max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+ current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+ if (current_lw < max_lw) {
+ tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & LC_RENEGOTIATION_SUPPORT) {
+ tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+ tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+ tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ }
+ }
+
+ for (i = 0; i < 10; i++) {
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+ break;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_REDO_EQ;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ mdelay(100);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp &= ~LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ }
+ }
+ }
+
+ speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~0xf;
+ if (mask & DRM_PCIE_SPEED_80)
+ tmp16 |= 3;
+ else if (mask & DRM_PCIE_SPEED_50)
+ tmp16 |= 2;
+ else
+ tmp16 |= 1;
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+ r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+ WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+ r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+ WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+static void si_program_aspm(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+ bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+ bool disable_clkreq = false;
+
+ if (amdgpu_aspm == 0)
+ return;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ data &= ~LC_XMIT_N_FTS_MASK;
+ data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+ data |= LC_GO_TO_RECOVERY;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE(PCIE_P_CNTL);
+ data |= P_IGNORE_EDB_ERR;
+ if (orig != data)
+ WREG32_PCIE(PCIE_P_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+ data |= LC_PMI_TO_L1_DIS;
+ if (!disable_l0s)
+ data |= LC_L0S_INACTIVITY(7);
+
+ if (!disable_l1) {
+ data |= LC_L1_INACTIVITY(7);
+ data &= ~LC_PMI_TO_L1_DIS;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+ if (!disable_plloff_in_l1) {
+ bool clk_req_support;
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+ if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+ data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+ data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
+ data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
+ data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+ data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+ data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
+ data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
+ data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+ }
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+ data |= LC_DYN_LANES_PWR_STATE(3);
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+ data &= ~LS2_EXIT_TIME_MASK;
+ if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+ data |= LS2_EXIT_TIME(5);
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+ data &= ~LS2_EXIT_TIME_MASK;
+ if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+ data |= LS2_EXIT_TIME(5);
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+
+ if (!disable_clkreq &&
+ !pci_is_root_bus(adev->pdev->bus)) {
+ struct pci_dev *root = adev->pdev->bus->self;
+ u32 lnkcap;
+
+ clk_req_support = false;
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+ clk_req_support = true;
+ } else {
+ clk_req_support = false;
+ }
+
+ if (clk_req_support) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+ data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+ orig = data = RREG32(THM_CLK_CNTL);
+ data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+ data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ if (orig != data)
+ WREG32(THM_CLK_CNTL, data);
+
+ orig = data = RREG32(MISC_CLK_CNTL);
+ data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+ data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ if (orig != data)
+ WREG32(MISC_CLK_CNTL, data);
+
+ orig = data = RREG32(CG_CLKPIN_CNTL);
+ data &= ~BCLK_AS_XCLK;
+ if (orig != data)
+ WREG32(CG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32(CG_CLKPIN_CNTL_2);
+ data &= ~FORCE_BIF_REFCLK_EN;
+ if (orig != data)
+ WREG32(CG_CLKPIN_CNTL_2, data);
+
+ orig = data = RREG32(MPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_CLKOUT_SEL_MASK;
+ data |= MPLL_CLKOUT_SEL(4);
+ if (orig != data)
+ WREG32(MPLL_BYPASSCLK_SEL, data);
+
+ orig = data = RREG32(SPLL_CNTL_MODE);
+ data &= ~SPLL_REFCLK_SEL_MASK;
+ if (orig != data)
+ WREG32(SPLL_CNTL_MODE, data);
+ }
+ }
+ } else {
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+
+ orig = data = RREG32_PCIE(PCIE_CNTL2);
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ if (orig != data)
+ WREG32_PCIE(PCIE_CNTL2, data);
+
+ if (!disable_l0s) {
+ data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+ data = RREG32_PCIE(PCIE_LC_STATUS1);
+ if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+ }
+ }
+}
+
+static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
+{
+ int readrq;
+ u16 v;
+
+ readrq = pcie_get_readrq(adev->pdev);
+ v = ffs(readrq) - 8;
+ if ((v == 0) || (v == 6) || (v == 7))
+ pcie_set_readrq(adev->pdev, 512);
+}
+
+static int si_common_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_fix_pci_max_read_req_size(adev);
+ si_init_golden_registers(adev);
+ si_pcie_gen3_enable(adev);
+ si_program_aspm(adev);
+
+ return 0;
+}
+
+static int si_common_hw_fini(void *handle)
+{
+ return 0;
+}
+
+static int si_common_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_common_hw_fini(adev);
+}
+
+static int si_common_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_common_hw_init(adev);
+}
+
+static bool si_common_is_idle(void *handle)
+{
+ return true;
+}
+
+static int si_common_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int si_common_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static int si_common_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int si_common_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs si_common_ip_funcs = {
+ .name = "si_common",
+ .early_init = si_common_early_init,
+ .late_init = NULL,
+ .sw_init = si_common_sw_init,
+ .sw_fini = si_common_sw_fini,
+ .hw_init = si_common_hw_init,
+ .hw_fini = si_common_hw_fini,
+ .suspend = si_common_suspend,
+ .resume = si_common_resume,
+ .is_idle = si_common_is_idle,
+ .wait_for_idle = si_common_wait_for_idle,
+ .soft_reset = si_common_soft_reset,
+ .set_clockgating_state = si_common_set_clockgating_state,
+ .set_powergating_state = si_common_set_powergating_state,
+};
+
+static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+{
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+ },
+/* {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &si_null_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_null_ip_funcs,
+ },
+ */
+};
+
+
+static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
+{
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+ },
+};
+
+int si_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_OLAND:
+ adev->ip_blocks = verde_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+ break;
+ case CHIP_HAINAN:
+ adev->ip_blocks = hainan_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+ break;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 1cef03deeac3..959d7b63e0e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,22 +21,13 @@
*
*/
-#ifndef FIJI_SMUMGR_H
-#define FIJI_SMUMGR_H
+#ifndef __SI_H__
+#define __SI_H__
-#include "fiji_ppsmc.h"
+extern const struct amd_ip_funcs si_common_ip_funcs;
-int fiji_smu_init(struct amdgpu_device *adev);
-int fiji_smu_fini(struct amdgpu_device *adev);
-int fiji_smu_start(struct amdgpu_device *adev);
-
-struct fiji_smu_private_data
-{
- uint8_t *header;
- uint32_t smu_buffer_addr_high;
- uint32_t smu_buffer_addr_low;
- uint32_t header_addr_high;
- uint32_t header_addr_low;
-};
+void si_srbm_select(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 queue, u32 vmid);
+int si_set_ip_blocks(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
new file mode 100644
index 000000000000..de358193a8f9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+#include "si/sid.h"
+
+const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+ DMA0_REGISTER_OFFSET,
+ DMA1_REGISTER_OFFSET
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+
+static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ return ring->adev->wb.wb[ring->rptr_offs>>2];
+}
+
+static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+ return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+}
+
+static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+ WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+}
+
+static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
+{
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+ amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
+ amdgpu_ring_write(ring, 1);
+}
+
+static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
+ * si_dma_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (VI).
+ */
+static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ /* write the fence */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ amdgpu_ring_write(ring, seq);
+ /* optionally write high bits as well */
+ if (write64bit) {
+ addr += 4;
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+ }
+ /* generate an interrupt */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
+}
+
+static void si_dma_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 rb_cntl;
+ unsigned i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ ring->ready = false;
+ }
+}
+
+static int si_dma_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
+ int i, r;
+ uint64_t rptr_addr;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
+ WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+
+ WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+ WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+ }
+
+ return 0;
+}
+
+/**
+ * si_dma_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned i;
+ unsigned index;
+ int r;
+ u32 tmp;
+ u64 gpu_addr;
+
+ r = amdgpu_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
+ }
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
+ r = amdgpu_ring_alloc(ring, 4);
+ if (r) {
+ DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ amdgpu_wb_free(adev, index);
+ return r;
+ }
+
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
+ amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_wb_free(adev, index);
+
+ return r;
+}
+
+/**
+ * si_dma_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (VI).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct fence *f = NULL;
+ unsigned index;
+ u32 tmp = 0;
+ u64 gpu_addr;
+ long r;
+
+ r = amdgpu_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ if (r)
+ goto err1;
+
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
+ goto err1;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ } else {
+ DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+
+err1:
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
+err0:
+ amdgpu_wb_free(adev, index);
+ return r;
+}
+
+/**
+ * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using DMA (SI).
+ */
+static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, bytes);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+}
+
+/**
+ * si_dma_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using DMA (SI).
+ */
+static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
+{
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ for (; ndw > 0; ndw -= 2) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
+ }
+}
+
+/**
+ * si_dma_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & AMDGPU_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * si_dma_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
+ (1 << 27)); /* Poll memory */
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ amdgpu_ring_write(ring, seq); /* value */
+ amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
+}
+
+/**
+ * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (VI).
+ */
+static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm_id < 8)
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ else
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
+ amdgpu_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0xff << 16); /* retry */
+ amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, 0); /* value */
+ amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
+}
+
+static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 7 + 3; /* si_dma_ring_emit_ib */
+}
+
+static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 3 + /* si_dma_ring_emit_hdp_flush */
+ 3 + /* si_dma_ring_emit_hdp_invalidate */
+ 6 + /* si_dma_ring_emit_pipeline_sync */
+ 12 + /* si_dma_ring_emit_vm_flush */
+ 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static int si_dma_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->sdma.num_instances = 2;
+
+ si_dma_set_ring_funcs(adev);
+ si_dma_set_buffer_funcs(adev);
+ si_dma_set_vm_pte_funcs(adev);
+ si_dma_set_irq_funcs(adev);
+
+ return 0;
+}
+
+static int si_dma_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* DMA0 trap event */
+ r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+ /* DMA1 trap event */
+ r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = false;
+ sprintf(ring->name, "sdma%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
+ &adev->sdma.trap_irq,
+ (i == 0) ?
+ AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+ AMDGPU_RING_TYPE_SDMA);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static int si_dma_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+
+ return 0;
+}
+
+static int si_dma_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_dma_start(adev);
+}
+
+static int si_dma_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_dma_stop(adev);
+
+ return 0;
+}
+
+static int si_dma_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_dma_hw_fini(adev);
+}
+
+static int si_dma_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_dma_hw_init(adev);
+}
+
+static bool si_dma_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 tmp = RREG32(SRBM_STATUS2);
+
+ if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+ return false;
+
+ return true;
+}
+
+static int si_dma_wait_for_idle(void *handle)
+{
+ unsigned i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (si_dma_is_idle(handle))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int si_dma_soft_reset(void *handle)
+{
+ DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
+ return 0;
+}
+
+static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_cntl;
+
+ switch (type) {
+ case AMDGPU_SDMA_IRQ_TRAP0:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl &= ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl |= TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ break;
+ default:
+ break;
+ }
+ break;
+ case AMDGPU_SDMA_IRQ_TRAP1:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl &= ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl |= TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int si_dma_process_trap_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ amdgpu_fence_process(&adev->sdma.instance[0].ring);
+
+ return 0;
+}
+
+static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ amdgpu_fence_process(&adev->sdma.instance[1].ring);
+
+ return 0;
+}
+
+static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal instruction in SDMA command stream\n");
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int si_dma_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ u32 orig, data, offset;
+ int i;
+ bool enable;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data &= ~MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ }
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data |= MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+
+ orig = data = RREG32(DMA_CLK_CTRL + offset);
+ data = 0xff000000;
+ if (data != orig)
+ WREG32(DMA_CLK_CTRL + offset, data);
+ }
+ }
+
+ return 0;
+}
+
+static int si_dma_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ u32 tmp;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ WREG32(DMA_PGFSM_WRITE, 0x00002000);
+ WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+
+ for (tmp = 0; tmp < 5; tmp++)
+ WREG32(DMA_PGFSM_WRITE, 0);
+
+ return 0;
+}
+
+const struct amd_ip_funcs si_dma_ip_funcs = {
+ .name = "si_dma",
+ .early_init = si_dma_early_init,
+ .late_init = NULL,
+ .sw_init = si_dma_sw_init,
+ .sw_fini = si_dma_sw_fini,
+ .hw_init = si_dma_hw_init,
+ .hw_fini = si_dma_hw_fini,
+ .suspend = si_dma_suspend,
+ .resume = si_dma_resume,
+ .is_idle = si_dma_is_idle,
+ .wait_for_idle = si_dma_wait_for_idle,
+ .soft_reset = si_dma_soft_reset,
+ .set_clockgating_state = si_dma_set_clockgating_state,
+ .set_powergating_state = si_dma_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+ .get_rptr = si_dma_ring_get_rptr,
+ .get_wptr = si_dma_ring_get_wptr,
+ .set_wptr = si_dma_ring_set_wptr,
+ .parse_cs = NULL,
+ .emit_ib = si_dma_ring_emit_ib,
+ .emit_fence = si_dma_ring_emit_fence,
+ .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
+ .emit_vm_flush = si_dma_ring_emit_vm_flush,
+ .emit_hdp_flush = si_dma_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
+ .test_ring = si_dma_ring_test_ring,
+ .test_ib = si_dma_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = si_dma_ring_pad_ib,
+ .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
+ .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
+ .set = si_dma_set_trap_irq_state,
+ .process = si_dma_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
+ .set = si_dma_set_trap_irq_state,
+ .process = si_dma_process_trap_irq_1,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
+ .process = si_dma_process_illegal_inst_irq,
+};
+
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
+ adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
+ adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
+}
+
+/**
+ * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (VI).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, byte_count);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
+}
+
+/**
+ * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (VI).
+ */
+static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
+ uint32_t src_data,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
+ 0, 0, 0, byte_count / 4);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = src_data;
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
+}
+
+
+static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
+ .copy_max_bytes = 0xffff8,
+ .copy_num_dw = 5,
+ .emit_copy_buffer = si_dma_emit_copy_buffer,
+
+ .fill_max_bytes = 0xffff8,
+ .fill_num_dw = 4,
+ .emit_fill_buffer = si_dma_emit_fill_buffer,
+};
+
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mman.buffer_funcs == NULL) {
+ adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ }
+}
+
+static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+ .copy_pte = si_dma_vm_copy_pte,
+ .write_pte = si_dma_vm_write_pte,
+ .set_pte_pde = si_dma_vm_set_pte_pde,
+};
+
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ if (adev->vm_manager.vm_pte_funcs == NULL) {
+ adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->vm_manager.vm_pte_rings[i] =
+ &adev->sdma.instance[i].ring;
+
+ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
new file mode 100644
index 000000000000..3a3e0c78a54b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_DMA_H__
+#define __SI_DMA_H__
+
+extern const struct amd_ip_funcs si_dma_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
new file mode 100644
index 000000000000..8bd08925b370
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -0,0 +1,8006 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_atombios.h"
+#include "si/sid.h"
+#include "r600_dpm.h"
+#include "si_dpm.h"
+#include "atom.h"
+#include "../include/pptable.h"
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x20000
+
+#define SCLK_MIN_DEEPSLEEP_FREQ 1350
+
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+
+#define BIOS_SCRATCH_4 0x5cd
+
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+ struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union fan_info {
+ struct _ATOM_PPLIB_FANTABLE fan;
+ struct _ATOM_PPLIB_FANTABLE2 fan2;
+ struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+ struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
+{
+ R600_UTC_DFLT_00,
+ R600_UTC_DFLT_01,
+ R600_UTC_DFLT_02,
+ R600_UTC_DFLT_03,
+ R600_UTC_DFLT_04,
+ R600_UTC_DFLT_05,
+ R600_UTC_DFLT_06,
+ R600_UTC_DFLT_07,
+ R600_UTC_DFLT_08,
+ R600_UTC_DFLT_09,
+ R600_UTC_DFLT_10,
+ R600_UTC_DFLT_11,
+ R600_UTC_DFLT_12,
+ R600_UTC_DFLT_13,
+ R600_UTC_DFLT_14,
+};
+
+static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
+{
+ R600_DTC_DFLT_00,
+ R600_DTC_DFLT_01,
+ R600_DTC_DFLT_02,
+ R600_DTC_DFLT_03,
+ R600_DTC_DFLT_04,
+ R600_DTC_DFLT_05,
+ R600_DTC_DFLT_06,
+ R600_DTC_DFLT_07,
+ R600_DTC_DFLT_08,
+ R600_DTC_DFLT_09,
+ R600_DTC_DFLT_10,
+ R600_DTC_DFLT_11,
+ R600_DTC_DFLT_12,
+ R600_DTC_DFLT_13,
+ R600_DTC_DFLT_14,
+};
+
+static const struct si_cac_config_reg cac_weights_tahiti[] =
+{
+ { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_tahiti[] =
+{
+ { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+
+};
+
+static const struct si_cac_config_reg cac_override_tahiti[] =
+{
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_tahiti =
+{
+ ((1 << 16) | 27027),
+ 6,
+ 0,
+ 4,
+ 95,
+ {
+ 0UL,
+ 0UL,
+ 4521550UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 40
+ },
+ 595000000UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_dte_data dte_data_tahiti =
+{
+ { 1159409, 0, 0, 0, 0 },
+ { 777, 0, 0, 0, 0 },
+ 2,
+ 54000,
+ 127000,
+ 25,
+ 2,
+ 10,
+ 13,
+ { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
+ { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
+ { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
+ 85,
+ false
+};
+
+#if 0
+static const struct si_dte_data dte_data_tahiti_le =
+{
+ { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
+ { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
+ 0x5,
+ 0xAFC8,
+ 0x64,
+ 0x32,
+ 1,
+ 0,
+ 0x10,
+ { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
+ { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
+ { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
+ 85,
+ true
+};
+#endif
+
+static const struct si_dte_data dte_data_tahiti_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_new_zealand =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
+ { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
+ 0x5,
+ 0xAFC8,
+ 0x69,
+ 0x32,
+ 1,
+ 0,
+ 0x10,
+ { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
+ 85,
+ true
+};
+
+static const struct si_dte_data dte_data_aruba_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_malta =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_cac_config_reg cac_weights_pitcairn[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_pitcairn[] =
+{
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_pitcairn[] =
+{
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_pitcairn =
+{
+ ((1 << 16) | 27027),
+ 5,
+ 0,
+ 6,
+ 100,
+ {
+ 51600000UL,
+ 1800000UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_dte_data dte_data_pitcairn =
+{
+ { 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0 },
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ 0,
+ false
+};
+
+static const struct si_dte_data dte_data_curacao_xt =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_curacao_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_neptune_xt =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_heathrow[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_cape_verde[] =
+{
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_cape_verde[] =
+{
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_cape_verde =
+{
+ ((1 << 16) | 0x6993),
+ 5,
+ 0,
+ 7,
+ 105,
+ {
+ 0UL,
+ 0UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_dte_data dte_data_cape_verde =
+{
+ { 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0 },
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ 0,
+ false
+};
+
+static const struct si_dte_data dte_data_venus_xtx =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
+ 5,
+ 55000,
+ 0x69,
+ 0xA,
+ 1,
+ 0,
+ 0x3,
+ { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_venus_xt =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
+ 5,
+ 55000,
+ 0x69,
+ 0xA,
+ 1,
+ 0,
+ 0x3,
+ { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_venus_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
+ 5,
+ 55000,
+ 0x69,
+ 0xA,
+ 1,
+ 0,
+ 0x3,
+ { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_cac_config_reg cac_weights_oland[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_pro[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_xt[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_pro[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_xt[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_oland[] =
+{
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_mars_pro[] =
+{
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_oland[] =
+{
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_oland =
+{
+ ((1 << 16) | 0x6993),
+ 5,
+ 0,
+ 7,
+ 105,
+ {
+ 0UL,
+ 0UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_powertune_data powertune_data_mars_pro =
+{
+ ((1 << 16) | 0x6993),
+ 5,
+ 0,
+ 7,
+ 105,
+ {
+ 0UL,
+ 0UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_dte_data dte_data_oland =
+{
+ { 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0 },
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ 0,
+ false
+};
+
+static const struct si_dte_data dte_data_mars_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 55000,
+ 105,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_sun_xt =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 55000,
+ 105,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+
+static const struct si_cac_config_reg cac_weights_hainan[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_hainan =
+{
+ ((1 << 16) | 0x6993),
+ 5,
+ 0,
+ 9,
+ 105,
+ {
+ 0UL,
+ 0UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
+static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
+static struct si_ps *si_get_ps(struct amdgpu_ps *rps);
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+ const struct atom_voltage_table *table,
+ u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+ SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+ u16 *std_voltage);
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 value);
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+ struct rv7xx_pl *pl,
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+ u32 engine_clock,
+ SISLANDS_SMC_SCLK_VALUE *sclk);
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
+
+static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
+{
+ struct si_power_info *pi = adev->pm.dpm.priv;
+ return pi;
+}
+
+static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
+ u16 v, s32 t, u32 ileakage, u32 *leakage)
+{
+ s64 kt, kv, leakage_w, i_leakage, vddc;
+ s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+ s64 tmp;
+
+ i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+ vddc = div64_s64(drm_int2fixp(v), 1000);
+ temperature = div64_s64(drm_int2fixp(t), 1000);
+
+ t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
+ t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
+ av = div64_s64(drm_int2fixp(coeff->av), 100000000);
+ bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
+ t_ref = drm_int2fixp(coeff->t_ref);
+
+ tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+ kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+ kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
+ kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
+
+ leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+ *leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
+ const struct ni_leakage_coeffients *coeff,
+ u16 v,
+ s32 t,
+ u32 i_leakage,
+ u32 *leakage)
+{
+ si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
+}
+
+static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
+ const u32 fixed_kt, u16 v,
+ u32 ileakage, u32 *leakage)
+{
+ s64 kt, kv, leakage_w, i_leakage, vddc;
+
+ i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+ vddc = div64_s64(drm_int2fixp(v), 1000);
+
+ kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
+ kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
+ drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
+
+ leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+ *leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
+ const struct ni_leakage_coeffients *coeff,
+ const u32 fixed_kt,
+ u16 v,
+ u32 i_leakage,
+ u32 *leakage)
+{
+ si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
+}
+
+
+static void si_update_dte_from_pl2(struct amdgpu_device *adev,
+ struct si_dte_data *dte_data)
+{
+ u32 p_limit1 = adev->pm.dpm.tdp_limit;
+ u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
+ u32 k = dte_data->k;
+ u32 t_max = dte_data->max_t;
+ u32 t_split[5] = { 10, 15, 20, 25, 30 };
+ u32 t_0 = dte_data->t0;
+ u32 i;
+
+ if (p_limit2 != 0 && p_limit2 <= p_limit1) {
+ dte_data->tdep_count = 3;
+
+ for (i = 0; i < k; i++) {
+ dte_data->r[i] =
+ (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
+ (p_limit2 * (u32)100);
+ }
+
+ dte_data->tdep_r[1] = dte_data->r[4] * 2;
+
+ for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
+ dte_data->tdep_r[i] = dte_data->r[4];
+ }
+ } else {
+ DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
+ }
+}
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
+{
+ struct ni_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
+{
+ struct si_ps *ps = aps->ps_priv;
+
+ return ps;
+}
+
+static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ bool update_dte_from_pl2 = false;
+
+ if (adev->asic_type == CHIP_TAHITI) {
+ si_pi->cac_weights = cac_weights_tahiti;
+ si_pi->lcac_config = lcac_tahiti;
+ si_pi->cac_override = cac_override_tahiti;
+ si_pi->powertune_data = &powertune_data_tahiti;
+ si_pi->dte_data = dte_data_tahiti;
+
+ switch (adev->pdev->device) {
+ case 0x6798:
+ si_pi->dte_data.enable_dte_by_default = true;
+ break;
+ case 0x6799:
+ si_pi->dte_data = dte_data_new_zealand;
+ break;
+ case 0x6790:
+ case 0x6791:
+ case 0x6792:
+ case 0x679E:
+ si_pi->dte_data = dte_data_aruba_pro;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x679B:
+ si_pi->dte_data = dte_data_malta;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x679A:
+ si_pi->dte_data = dte_data_tahiti_pro;
+ update_dte_from_pl2 = true;
+ break;
+ default:
+ if (si_pi->dte_data.enable_dte_by_default == true)
+ DRM_ERROR("DTE is not enabled!\n");
+ break;
+ }
+ } else if (adev->asic_type == CHIP_PITCAIRN) {
+ si_pi->cac_weights = cac_weights_pitcairn;
+ si_pi->lcac_config = lcac_pitcairn;
+ si_pi->cac_override = cac_override_pitcairn;
+ si_pi->powertune_data = &powertune_data_pitcairn;
+
+ switch (adev->pdev->device) {
+ case 0x6810:
+ case 0x6818:
+ si_pi->dte_data = dte_data_curacao_xt;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6819:
+ case 0x6811:
+ si_pi->dte_data = dte_data_curacao_pro;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6800:
+ case 0x6806:
+ si_pi->dte_data = dte_data_neptune_xt;
+ update_dte_from_pl2 = true;
+ break;
+ default:
+ si_pi->dte_data = dte_data_pitcairn;
+ break;
+ }
+ } else if (adev->asic_type == CHIP_VERDE) {
+ si_pi->lcac_config = lcac_cape_verde;
+ si_pi->cac_override = cac_override_cape_verde;
+ si_pi->powertune_data = &powertune_data_cape_verde;
+
+ switch (adev->pdev->device) {
+ case 0x683B:
+ case 0x683F:
+ case 0x6829:
+ case 0x6835:
+ si_pi->cac_weights = cac_weights_cape_verde_pro;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ case 0x682C:
+ si_pi->cac_weights = cac_weights_cape_verde_pro;
+ si_pi->dte_data = dte_data_sun_xt;
+ break;
+ case 0x6825:
+ case 0x6827:
+ si_pi->cac_weights = cac_weights_heathrow;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ case 0x6824:
+ case 0x682D:
+ si_pi->cac_weights = cac_weights_chelsea_xt;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ case 0x682F:
+ si_pi->cac_weights = cac_weights_chelsea_pro;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ case 0x6820:
+ si_pi->cac_weights = cac_weights_heathrow;
+ si_pi->dte_data = dte_data_venus_xtx;
+ break;
+ case 0x6821:
+ si_pi->cac_weights = cac_weights_heathrow;
+ si_pi->dte_data = dte_data_venus_xt;
+ break;
+ case 0x6823:
+ case 0x682B:
+ case 0x6822:
+ case 0x682A:
+ si_pi->cac_weights = cac_weights_chelsea_pro;
+ si_pi->dte_data = dte_data_venus_pro;
+ break;
+ default:
+ si_pi->cac_weights = cac_weights_cape_verde;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ }
+ } else if (adev->asic_type == CHIP_OLAND) {
+ si_pi->lcac_config = lcac_mars_pro;
+ si_pi->cac_override = cac_override_oland;
+ si_pi->powertune_data = &powertune_data_mars_pro;
+ si_pi->dte_data = dte_data_mars_pro;
+
+ switch (adev->pdev->device) {
+ case 0x6601:
+ case 0x6621:
+ case 0x6603:
+ case 0x6605:
+ si_pi->cac_weights = cac_weights_mars_pro;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6600:
+ case 0x6606:
+ case 0x6620:
+ case 0x6604:
+ si_pi->cac_weights = cac_weights_mars_xt;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6611:
+ case 0x6613:
+ case 0x6608:
+ si_pi->cac_weights = cac_weights_oland_pro;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6610:
+ si_pi->cac_weights = cac_weights_oland_xt;
+ update_dte_from_pl2 = true;
+ break;
+ default:
+ si_pi->cac_weights = cac_weights_oland;
+ si_pi->lcac_config = lcac_oland;
+ si_pi->cac_override = cac_override_oland;
+ si_pi->powertune_data = &powertune_data_oland;
+ si_pi->dte_data = dte_data_oland;
+ break;
+ }
+ } else if (adev->asic_type == CHIP_HAINAN) {
+ si_pi->cac_weights = cac_weights_hainan;
+ si_pi->lcac_config = lcac_oland;
+ si_pi->cac_override = cac_override_oland;
+ si_pi->powertune_data = &powertune_data_hainan;
+ si_pi->dte_data = dte_data_sun_xt;
+ update_dte_from_pl2 = true;
+ } else {
+ DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
+ return;
+ }
+
+ ni_pi->enable_power_containment = false;
+ ni_pi->enable_cac = false;
+ ni_pi->enable_sq_ramping = false;
+ si_pi->enable_dte = false;
+
+ if (si_pi->powertune_data->enable_powertune_by_default) {
+ ni_pi->enable_power_containment = true;
+ ni_pi->enable_cac = true;
+ if (si_pi->dte_data.enable_dte_by_default) {
+ si_pi->enable_dte = true;
+ if (update_dte_from_pl2)
+ si_update_dte_from_pl2(adev, &si_pi->dte_data);
+
+ }
+ ni_pi->enable_sq_ramping = true;
+ }
+
+ ni_pi->driver_calculate_cac_leakage = true;
+ ni_pi->cac_configuration_required = true;
+
+ if (ni_pi->cac_configuration_required) {
+ ni_pi->support_cac_long_term_average = true;
+ si_pi->dyn_powertune_data.l2_lta_window_size =
+ si_pi->powertune_data->l2_lta_window_size_default;
+ si_pi->dyn_powertune_data.lts_truncate =
+ si_pi->powertune_data->lts_truncate_default;
+ } else {
+ ni_pi->support_cac_long_term_average = false;
+ si_pi->dyn_powertune_data.l2_lta_window_size = 0;
+ si_pi->dyn_powertune_data.lts_truncate = 0;
+ }
+
+ si_pi->dyn_powertune_data.disable_uvd_powertune = false;
+}
+
+static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
+{
+ return 1;
+}
+
+static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
+{
+ u32 xclk;
+ u32 wintime;
+ u32 cac_window;
+ u32 cac_window_size;
+
+ xclk = amdgpu_asic_get_xclk(adev);
+
+ if (xclk == 0)
+ return 0;
+
+ cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+ cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
+
+ wintime = (cac_window_size * 100) / xclk;
+
+ return wintime;
+}
+
+static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
+{
+ return power_in_watts;
+}
+
+static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
+ bool adjust_polarity,
+ u32 tdp_adjustment,
+ u32 *tdp_limit,
+ u32 *near_tdp_limit)
+{
+ u32 adjustment_delta, max_tdp_limit;
+
+ if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
+ return -EINVAL;
+
+ max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
+
+ if (adjust_polarity) {
+ *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+ *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
+ } else {
+ *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+ adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit;
+ if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
+ *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
+ else
+ *near_tdp_limit = 0;
+ }
+
+ if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
+ return -EINVAL;
+ if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (ni_pi->enable_power_containment) {
+ SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+ PP_SIslands_PAPMParameters *papm_parm;
+ struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
+ u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+ u32 tdp_limit;
+ u32 near_tdp_limit;
+ int ret;
+
+ if (scaling_factor == 0)
+ return -EINVAL;
+
+ memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+ ret = si_calculate_adjusted_tdp_limits(adev,
+ false, /* ??? */
+ adev->pm.dpm.tdp_adjustment,
+ &tdp_limit,
+ &near_tdp_limit);
+ if (ret)
+ return ret;
+
+ smc_table->dpm2Params.TDPLimit =
+ cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
+ smc_table->dpm2Params.NearTDPLimit =
+ cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
+ smc_table->dpm2Params.SafePowerLimit =
+ cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+ offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
+ (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
+ sizeof(u32) * 3,
+ si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (si_pi->enable_ppm) {
+ papm_parm = &si_pi->papm_parm;
+ memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
+ papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
+ papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
+ papm_parm->dGPU_T_Warning = cpu_to_be32(95);
+ papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
+ papm_parm->PlatformPowerLimit = 0xffffffff;
+ papm_parm->NearTDPLimitPAPM = 0xffffffff;
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
+ (u8 *)papm_parm,
+ sizeof(PP_SIslands_PAPMParameters),
+ si_pi->sram_end);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (ni_pi->enable_power_containment) {
+ SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+ u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+ int ret;
+
+ memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+ smc_table->dpm2Params.NearTDPLimit =
+ cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
+ smc_table->dpm2Params.SafePowerLimit =
+ cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ (si_pi->state_table_start +
+ offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+ offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
+ (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
+ sizeof(u32) * 2,
+ si_pi->sram_end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
+ const u16 prev_std_vddc,
+ const u16 curr_std_vddc)
+{
+ u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
+ u64 prev_vddc = (u64)prev_std_vddc;
+ u64 curr_vddc = (u64)curr_std_vddc;
+ u64 pwr_efficiency_ratio, n, d;
+
+ if ((prev_vddc == 0) || (curr_vddc == 0))
+ return 0;
+
+ n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
+ d = prev_vddc * prev_vddc;
+ pwr_efficiency_ratio = div64_u64(n, d);
+
+ if (pwr_efficiency_ratio > (u64)0xFFFF)
+ return 0;
+
+ return (u16)pwr_efficiency_ratio;
+}
+
+static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
+ amdgpu_state->vclk && amdgpu_state->dclk)
+ return true;
+
+ return false;
+}
+
+struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+static int si_populate_power_containment_values(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ SISLANDS_SMC_VOLTAGE_VALUE vddc;
+ u32 prev_sclk;
+ u32 max_sclk;
+ u32 min_sclk;
+ u16 prev_std_vddc;
+ u16 curr_std_vddc;
+ int i;
+ u16 pwr_efficiency_ratio;
+ u8 max_ps_percent;
+ bool disable_uvd_power_tune;
+ int ret;
+
+ if (ni_pi->enable_power_containment == false)
+ return 0;
+
+ if (state->performance_level_count == 0)
+ return -EINVAL;
+
+ if (smc_state->levelCount != state->performance_level_count)
+ return -EINVAL;
+
+ disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
+
+ smc_state->levels[0].dpm2.MaxPS = 0;
+ smc_state->levels[0].dpm2.NearTDPDec = 0;
+ smc_state->levels[0].dpm2.AboveSafeInc = 0;
+ smc_state->levels[0].dpm2.BelowSafeInc = 0;
+ smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+ for (i = 1; i < state->performance_level_count; i++) {
+ prev_sclk = state->performance_levels[i-1].sclk;
+ max_sclk = state->performance_levels[i].sclk;
+ if (i == 1)
+ max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
+ else
+ max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
+
+ if (prev_sclk > max_sclk)
+ return -EINVAL;
+
+ if ((max_ps_percent == 0) ||
+ (prev_sclk == max_sclk) ||
+ disable_uvd_power_tune)
+ min_sclk = max_sclk;
+ else if (i == 1)
+ min_sclk = prev_sclk;
+ else
+ min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
+
+ if (min_sclk < state->performance_levels[0].sclk)
+ min_sclk = state->performance_levels[0].sclk;
+
+ if (min_sclk == 0)
+ return -EINVAL;
+
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ state->performance_levels[i-1].vddc, &vddc);
+ if (ret)
+ return ret;
+
+ ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
+ if (ret)
+ return ret;
+
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ state->performance_levels[i].vddc, &vddc);
+ if (ret)
+ return ret;
+
+ ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
+ if (ret)
+ return ret;
+
+ pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
+ prev_std_vddc, curr_std_vddc);
+
+ smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
+ smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
+ smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
+ smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
+ smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
+ }
+
+ return 0;
+}
+
+static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ u32 sq_power_throttle, sq_power_throttle2;
+ bool enable_sq_ramping = ni_pi->enable_sq_ramping;
+ int i;
+
+ if (state->performance_level_count == 0)
+ return -EINVAL;
+
+ if (smc_state->levelCount != state->performance_level_count)
+ return -EINVAL;
+
+ if (adev->pm.dpm.sq_ramping_threshold == 0)
+ return -EINVAL;
+
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+ enable_sq_ramping = false;
+
+ if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+ enable_sq_ramping = false;
+
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+ enable_sq_ramping = false;
+
+ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ enable_sq_ramping = false;
+
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ enable_sq_ramping = false;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ sq_power_throttle = 0;
+ sq_power_throttle2 = 0;
+
+ if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
+ enable_sq_ramping) {
+ sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
+ sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
+ sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
+ sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
+ sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+ } else {
+ sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
+ sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ }
+
+ smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
+ smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
+ }
+
+ return 0;
+}
+
+static int si_enable_power_containment(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ bool enable)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (ni_pi->enable_power_containment) {
+ if (enable) {
+ if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ ni_pi->pc_enabled = false;
+ } else {
+ ni_pi->pc_enabled = true;
+ }
+ }
+ } else {
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ ni_pi->pc_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int ret = 0;
+ struct si_dte_data *dte_data = &si_pi->dte_data;
+ Smc_SIslands_DTE_Configuration *dte_tables = NULL;
+ u32 table_size;
+ u8 tdep_count;
+ u32 i;
+
+ if (dte_data == NULL)
+ si_pi->enable_dte = false;
+
+ if (si_pi->enable_dte == false)
+ return 0;
+
+ if (dte_data->k <= 0)
+ return -EINVAL;
+
+ dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
+ if (dte_tables == NULL) {
+ si_pi->enable_dte = false;
+ return -ENOMEM;
+ }
+
+ table_size = dte_data->k;
+
+ if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
+ table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
+
+ tdep_count = dte_data->tdep_count;
+ if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
+ tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
+
+ dte_tables->K = cpu_to_be32(table_size);
+ dte_tables->T0 = cpu_to_be32(dte_data->t0);
+ dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
+ dte_tables->WindowSize = dte_data->window_size;
+ dte_tables->temp_select = dte_data->temp_select;
+ dte_tables->DTE_mode = dte_data->dte_mode;
+ dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
+
+ if (tdep_count > 0)
+ table_size--;
+
+ for (i = 0; i < table_size; i++) {
+ dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
+ dte_tables->R[i] = cpu_to_be32(dte_data->r[i]);
+ }
+
+ dte_tables->Tdep_count = tdep_count;
+
+ for (i = 0; i < (u32)tdep_count; i++) {
+ dte_tables->T_limits[i] = dte_data->t_limits[i];
+ dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
+ dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
+ }
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
+ (u8 *)dte_tables,
+ sizeof(Smc_SIslands_DTE_Configuration),
+ si_pi->sram_end);
+ kfree(dte_tables);
+
+ return ret;
+}
+
+static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
+ u16 *max, u16 *min)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct amdgpu_cac_leakage_table *table =
+ &adev->pm.dpm.dyn_state.cac_leakage_table;
+ u32 i;
+ u32 v0_loadline;
+
+ if (table == NULL)
+ return -EINVAL;
+
+ *max = 0;
+ *min = 0xFFFF;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].vddc > *max)
+ *max = table->entries[i].vddc;
+ if (table->entries[i].vddc < *min)
+ *min = table->entries[i].vddc;
+ }
+
+ if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
+ return -EINVAL;
+
+ v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
+
+ if (v0_loadline > 0xFFFFUL)
+ return -EINVAL;
+
+ *min = (u16)v0_loadline;
+
+ if ((*min > *max) || (*max == 0) || (*min == 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
+{
+ return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
+ SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+}
+
+static int si_init_dte_leakage_table(struct amdgpu_device *adev,
+ PP_SIslands_CacConfig *cac_tables,
+ u16 vddc_max, u16 vddc_min, u16 vddc_step,
+ u16 t0, u16 t_step)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 leakage;
+ unsigned int i, j;
+ s32 t;
+ u32 smc_leakage;
+ u32 scaling_factor;
+ u16 voltage;
+
+ scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+ for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
+ t = (1000 * (i * t_step + t0));
+
+ for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+ voltage = vddc_max - (vddc_step * j);
+
+ si_calculate_leakage_for_v_and_t(adev,
+ &si_pi->powertune_data->leakage_coefficients,
+ voltage,
+ t,
+ si_pi->dyn_powertune_data.cac_leakage,
+ &leakage);
+
+ smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+ if (smc_leakage > 0xFFFF)
+ smc_leakage = 0xFFFF;
+
+ cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+ cpu_to_be16((u16)smc_leakage);
+ }
+ }
+ return 0;
+}
+
+static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
+ PP_SIslands_CacConfig *cac_tables,
+ u16 vddc_max, u16 vddc_min, u16 vddc_step)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 leakage;
+ unsigned int i, j;
+ u32 smc_leakage;
+ u32 scaling_factor;
+ u16 voltage;
+
+ scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+ for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+ voltage = vddc_max - (vddc_step * j);
+
+ si_calculate_leakage_for_v(adev,
+ &si_pi->powertune_data->leakage_coefficients,
+ si_pi->powertune_data->fixed_kt,
+ voltage,
+ si_pi->dyn_powertune_data.cac_leakage,
+ &leakage);
+
+ smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+ if (smc_leakage > 0xFFFF)
+ smc_leakage = 0xFFFF;
+
+ for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
+ cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+ cpu_to_be16((u16)smc_leakage);
+ }
+ return 0;
+}
+
+static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PP_SIslands_CacConfig *cac_tables = NULL;
+ u16 vddc_max, vddc_min, vddc_step;
+ u16 t0, t_step;
+ u32 load_line_slope, reg;
+ int ret = 0;
+ u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
+
+ if (ni_pi->enable_cac == false)
+ return 0;
+
+ cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
+ if (!cac_tables)
+ return -ENOMEM;
+
+ reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
+ reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
+ WREG32(CG_CAC_CTRL, reg);
+
+ si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
+ si_pi->dyn_powertune_data.dc_pwr_value =
+ si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
+ si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
+ si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
+
+ si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
+
+ ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
+ if (ret)
+ goto done_free;
+
+ vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
+ vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
+ t_step = 4;
+ t0 = 60;
+
+ if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
+ ret = si_init_dte_leakage_table(adev, cac_tables,
+ vddc_max, vddc_min, vddc_step,
+ t0, t_step);
+ else
+ ret = si_init_simplified_leakage_table(adev, cac_tables,
+ vddc_max, vddc_min, vddc_step);
+ if (ret)
+ goto done_free;
+
+ load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
+
+ cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
+ cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
+ cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
+ cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
+ cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
+ cac_tables->R_LL = cpu_to_be32(load_line_slope);
+ cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
+ cac_tables->calculation_repeats = cpu_to_be32(2);
+ cac_tables->dc_cac = cpu_to_be32(0);
+ cac_tables->log2_PG_LKG_SCALE = 12;
+ cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
+ cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
+ cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
+ (u8 *)cac_tables,
+ sizeof(PP_SIslands_CacConfig),
+ si_pi->sram_end);
+
+ if (ret)
+ goto done_free;
+
+ ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
+
+done_free:
+ if (ret) {
+ ni_pi->enable_cac = false;
+ ni_pi->enable_power_containment = false;
+ }
+
+ kfree(cac_tables);
+
+ return ret;
+}
+
+static int si_program_cac_config_registers(struct amdgpu_device *adev,
+ const struct si_cac_config_reg *cac_config_regs)
+{
+ const struct si_cac_config_reg *config_regs = cac_config_regs;
+ u32 data = 0, offset;
+
+ if (!config_regs)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ switch (config_regs->type) {
+ case SISLANDS_CACCONFIG_CGIND:
+ offset = SMC_CG_IND_START + config_regs->offset;
+ if (offset < SMC_CG_IND_END)
+ data = RREG32_SMC(offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+
+ switch (config_regs->type) {
+ case SISLANDS_CACCONFIG_CGIND:
+ offset = SMC_CG_IND_START + config_regs->offset;
+ if (offset < SMC_CG_IND_END)
+ WREG32_SMC(offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset, data);
+ break;
+ }
+ config_regs++;
+ }
+ return 0;
+}
+
+static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int ret;
+
+ if ((ni_pi->enable_cac == false) ||
+ (ni_pi->cac_configuration_required == false))
+ return 0;
+
+ ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
+ if (ret)
+ return ret;
+ ret = si_program_cac_config_registers(adev, si_pi->cac_override);
+ if (ret)
+ return ret;
+ ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int si_enable_smc_cac(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ bool enable)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (ni_pi->enable_cac) {
+ if (enable) {
+ if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+ if (ni_pi->support_cac_long_term_average) {
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
+ if (smc_result != PPSMC_Result_OK)
+ ni_pi->support_cac_long_term_average = false;
+ }
+
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ ni_pi->cac_enabled = false;
+ } else {
+ ni_pi->cac_enabled = true;
+ }
+
+ if (si_pi->enable_dte) {
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ }
+ }
+ } else if (ni_pi->cac_enabled) {
+ if (si_pi->enable_dte)
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
+
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
+
+ ni_pi->cac_enabled = false;
+
+ if (ni_pi->support_cac_long_term_average)
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
+ }
+ }
+ return ret;
+}
+
+static int si_init_smc_spll_table(struct amdgpu_device *adev)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
+ SISLANDS_SMC_SCLK_VALUE sclk_params;
+ u32 fb_div, p_div;
+ u32 clk_s, clk_v;
+ u32 sclk = 0;
+ int ret = 0;
+ u32 tmp;
+ int i;
+
+ if (si_pi->spll_table_start == 0)
+ return -EINVAL;
+
+ spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
+ if (spll_table == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < 256; i++) {
+ ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
+ if (ret)
+ break;
+ p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
+ fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+ clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
+ clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+
+ fb_div &= ~0x00001FFF;
+ fb_div >>= 1;
+ clk_v >>= 6;
+
+ if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+ ret = -EINVAL;
+ if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
+ ret = -EINVAL;
+ if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+ ret = -EINVAL;
+ if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
+ ret = -EINVAL;
+
+ if (ret)
+ break;
+
+ tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
+ ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
+ spll_table->freq[i] = cpu_to_be32(tmp);
+
+ tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
+ ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
+ spll_table->ss[i] = cpu_to_be32(tmp);
+
+ sclk += 512;
+ }
+
+
+ if (!ret)
+ ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
+ (u8 *)spll_table,
+ sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
+ si_pi->sram_end);
+
+ if (ret)
+ ni_pi->enable_power_containment = false;
+
+ kfree(spll_table);
+
+ return ret;
+}
+
+struct si_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 max_sclk;
+ u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
+ { 0, 0, 0, 0 },
+};
+
+static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
+ u16 vce_voltage)
+{
+ u16 highest_leakage = 0;
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int i;
+
+ for (i = 0; i < si_pi->leakage_voltage.count; i++){
+ if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
+ highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
+ }
+
+ if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
+ return highest_leakage;
+
+ return vce_voltage;
+}
+
+static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
+ u32 evclk, u32 ecclk, u16 *voltage)
+{
+ u32 i;
+ int ret = -EINVAL;
+ struct amdgpu_vce_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ if (((evclk == 0) && (ecclk == 0)) ||
+ (table && (table->count == 0))) {
+ *voltage = 0;
+ return 0;
+ }
+
+ for (i = 0; i < table->count; i++) {
+ if ((evclk <= table->entries[i].evclk) &&
+ (ecclk <= table->entries[i].ecclk)) {
+ *voltage = table->entries[i].v;
+ ret = 0;
+ break;
+ }
+ }
+
+ /* if no match return the highest voltage */
+ if (ret)
+ *voltage = table->entries[table->count - 1].v;
+
+ *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
+
+ return ret;
+}
+
+static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+{
+
+ u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+
+ if (vblank_time < switch_limit)
+ return true;
+ else
+ return false;
+
+}
+
+static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
+ u32 arb_freq_src, u32 arb_freq_dest)
+{
+ u32 mc_arb_dram_timing;
+ u32 mc_arb_dram_timing2;
+ u32 burst_time;
+ u32 mc_cg_config;
+
+ switch (arb_freq_src) {
+ case MC_CG_ARB_FREQ_F0:
+ mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+ mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+ burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
+ mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
+ burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
+ break;
+ case MC_CG_ARB_FREQ_F2:
+ mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
+ mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
+ burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
+ break;
+ case MC_CG_ARB_FREQ_F3:
+ mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
+ mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
+ burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (arb_freq_dest) {
+ case MC_CG_ARB_FREQ_F0:
+ WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+ WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+ WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+ WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+ WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
+ break;
+ case MC_CG_ARB_FREQ_F2:
+ WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
+ WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
+ WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
+ break;
+ case MC_CG_ARB_FREQ_F3:
+ WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
+ WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
+ WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
+ WREG32(MC_CG_CONFIG, mc_cg_config);
+ WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
+
+ return 0;
+}
+
+static void ni_update_current_ps(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *new_ps = si_get_ps(rps);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+ eg_pi->current_rps = *rps;
+ ni_pi->current_ps = *new_ps;
+ eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+}
+
+static void ni_update_requested_ps(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *new_ps = si_get_ps(rps);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+ eg_pi->requested_rps = *rps;
+ ni_pi->requested_ps = *new_ps;
+ eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+}
+
+static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_ps,
+ struct amdgpu_ps *old_ps)
+{
+ struct si_ps *new_state = si_get_ps(new_ps);
+ struct si_ps *current_state = si_get_ps(old_ps);
+
+ if ((new_ps->vclk == old_ps->vclk) &&
+ (new_ps->dclk == old_ps->dclk))
+ return;
+
+ if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
+ current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+ return;
+
+ amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_ps,
+ struct amdgpu_ps *old_ps)
+{
+ struct si_ps *new_state = si_get_ps(new_ps);
+ struct si_ps *current_state = si_get_ps(old_ps);
+
+ if ((new_ps->vclk == old_ps->vclk) &&
+ (new_ps->dclk == old_ps->dclk))
+ return;
+
+ if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
+ current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+ return;
+
+ amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
+{
+ unsigned int i;
+
+ for (i = 0; i < table->count; i++)
+ if (voltage <= table->entries[i].value)
+ return table->entries[i].value;
+
+ return table->entries[table->count - 1].value;
+}
+
+static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
+ u32 max_clock, u32 requested_clock)
+{
+ unsigned int i;
+
+ if ((clocks == NULL) || (clocks->count == 0))
+ return (requested_clock < max_clock) ? requested_clock : max_clock;
+
+ for (i = 0; i < clocks->count; i++) {
+ if (clocks->values[i] >= requested_clock)
+ return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
+ }
+
+ return (clocks->values[clocks->count - 1] < max_clock) ?
+ clocks->values[clocks->count - 1] : max_clock;
+}
+
+static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
+ u32 max_mclk, u32 requested_mclk)
+{
+ return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
+ max_mclk, requested_mclk);
+}
+
+static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
+ u32 max_sclk, u32 requested_sclk)
+{
+ return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
+ max_sclk, requested_sclk);
+}
+
+static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
+ u32 *max_clock)
+{
+ u32 i, clock = 0;
+
+ if ((table == NULL) || (table->count == 0)) {
+ *max_clock = clock;
+ return;
+ }
+
+ for (i = 0; i < table->count; i++) {
+ if (clock < table->entries[i].clk)
+ clock = table->entries[i].clk;
+ }
+ *max_clock = clock;
+}
+
+static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
+ u32 clock, u16 max_voltage, u16 *voltage)
+{
+ u32 i;
+
+ if ((table == NULL) || (table->count == 0))
+ return;
+
+ for (i= 0; i < table->count; i++) {
+ if (clock <= table->entries[i].clk) {
+ if (*voltage < table->entries[i].v)
+ *voltage = (u16)((table->entries[i].v < max_voltage) ?
+ table->entries[i].v : max_voltage);
+ return;
+ }
+ }
+
+ *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
+}
+
+static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
+ const struct amdgpu_clock_and_voltage_limits *max_limits,
+ struct rv7xx_pl *pl)
+{
+
+ if ((pl->mclk == 0) || (pl->sclk == 0))
+ return;
+
+ if (pl->mclk == pl->sclk)
+ return;
+
+ if (pl->mclk > pl->sclk) {
+ if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
+ pl->sclk = btc_get_valid_sclk(adev,
+ max_limits->sclk,
+ (pl->mclk +
+ (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
+ adev->pm.dpm.dyn_state.mclk_sclk_ratio);
+ } else {
+ if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
+ pl->mclk = btc_get_valid_mclk(adev,
+ max_limits->mclk,
+ pl->sclk -
+ adev->pm.dpm.dyn_state.sclk_mclk_delta);
+ }
+}
+
+static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
+ u16 max_vddc, u16 max_vddci,
+ u16 *vddc, u16 *vddci)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ u16 new_voltage;
+
+ if ((0 == *vddc) || (0 == *vddci))
+ return;
+
+ if (*vddc > *vddci) {
+ if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+ new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
+ (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+ *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
+ }
+ } else {
+ if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+ new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
+ (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+ *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
+ }
+ }
+}
+
+static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
+ u32 sys_mask,
+ enum amdgpu_pcie_gen asic_gen,
+ enum amdgpu_pcie_gen default_gen)
+{
+ switch (asic_gen) {
+ case AMDGPU_PCIE_GEN1:
+ return AMDGPU_PCIE_GEN1;
+ case AMDGPU_PCIE_GEN2:
+ return AMDGPU_PCIE_GEN2;
+ case AMDGPU_PCIE_GEN3:
+ return AMDGPU_PCIE_GEN3;
+ default:
+ if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+ return AMDGPU_PCIE_GEN3;
+ else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+ return AMDGPU_PCIE_GEN2;
+ else
+ return AMDGPU_PCIE_GEN1;
+ }
+ return AMDGPU_PCIE_GEN1;
+}
+
+static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+ u32 *p, u32 *u)
+{
+ u32 b_c = 0;
+ u32 i_c;
+ u32 tmp;
+
+ i_c = (i * r_c) / 100;
+ tmp = i_c >> p_b;
+
+ while (tmp) {
+ b_c++;
+ tmp >>= 1;
+ }
+
+ *u = (b_c + 1) / 2;
+ *p = i_c / (1 << (2 * (*u)));
+}
+
+static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
+{
+ u32 k, a, ah, al;
+ u32 t1;
+
+ if ((fl == 0) || (fh == 0) || (fl > fh))
+ return -EINVAL;
+
+ k = (100 * fh) / fl;
+ t1 = (t * (k - 100));
+ a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
+ a = (a + 5) / 10;
+ ah = ((a * t) + 5000) / 10000;
+ al = a - ah;
+
+ *th = t - ah;
+ *tl = t + al;
+
+ return 0;
+}
+
+static bool r600_is_uvd_state(u32 class, u32 class2)
+{
+ if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ return true;
+ if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+ return true;
+ if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+ return true;
+ if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+ return true;
+ if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+ return true;
+ return false;
+}
+
+static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
+{
+ return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+static void rv770_get_max_vddc(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ u16 vddc;
+
+ if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
+ pi->max_vddc = 0;
+ else
+ pi->max_vddc = vddc;
+}
+
+static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct amdgpu_atom_ss ss;
+
+ pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ if (pi->sclk_ss || pi->mclk_ss)
+ pi->dynamic_ss = true;
+ else
+ pi->dynamic_ss = false;
+}
+
+
+static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *ps = si_get_ps(rps);
+ struct amdgpu_clock_and_voltage_limits *max_limits;
+ bool disable_mclk_switching = false;
+ bool disable_sclk_switching = false;
+ u32 mclk, sclk;
+ u16 vddc, vddci, min_vce_voltage = 0;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+ u32 max_sclk = 0, max_mclk = 0;
+ int i;
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (adev->pdev->vendor == p->chip_vendor &&
+ adev->pdev->device == p->chip_device &&
+ adev->pdev->subsystem_vendor == p->subsys_vendor &&
+ adev->pdev->subsystem_device == p->subsys_device) {
+ max_sclk = p->max_sclk;
+ max_mclk = p->max_mclk;
+ break;
+ }
+ ++p;
+ }
+ /* limit mclk on all R7 370 parts for stability */
+ if (adev->pdev->device == 0x6811 &&
+ adev->pdev->revision == 0x81)
+ max_mclk = 120000;
+ /* limit sclk/mclk on Jet parts for stability */
+ if (adev->pdev->device == 0x6665 &&
+ adev->pdev->revision == 0xc3) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+
+ if (rps->vce_active) {
+ rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+ rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+ si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
+ &min_vce_voltage);
+ } else {
+ rps->evclk = 0;
+ rps->ecclk = 0;
+ }
+
+ if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+ si_dpm_vblank_too_short(adev))
+ disable_mclk_switching = true;
+
+ if (rps->vclk || rps->dclk) {
+ disable_mclk_switching = true;
+ disable_sclk_switching = true;
+ }
+
+ if (adev->pm.dpm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ for (i = ps->performance_level_count - 2; i >= 0; i--) {
+ if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
+ }
+ if (adev->pm.dpm.ac_power == false) {
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk > max_limits->mclk)
+ ps->performance_levels[i].mclk = max_limits->mclk;
+ if (ps->performance_levels[i].sclk > max_limits->sclk)
+ ps->performance_levels[i].sclk = max_limits->sclk;
+ if (ps->performance_levels[i].vddc > max_limits->vddc)
+ ps->performance_levels[i].vddc = max_limits->vddc;
+ if (ps->performance_levels[i].vddci > max_limits->vddci)
+ ps->performance_levels[i].vddci = max_limits->vddci;
+ }
+ }
+
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (max_sclk_vddc) {
+ if (ps->performance_levels[i].sclk > max_sclk_vddc)
+ ps->performance_levels[i].sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddci)
+ ps->performance_levels[i].mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ ps->performance_levels[i].mclk = max_mclk_vddc;
+ }
+ if (max_mclk) {
+ if (ps->performance_levels[i].mclk > max_mclk)
+ ps->performance_levels[i].mclk = max_mclk;
+ }
+ if (max_sclk) {
+ if (ps->performance_levels[i].sclk > max_sclk)
+ ps->performance_levels[i].sclk = max_sclk;
+ }
+ }
+
+ /* XXX validate the min clocks required for display */
+
+ if (disable_mclk_switching) {
+ mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
+ vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
+ } else {
+ mclk = ps->performance_levels[0].mclk;
+ vddci = ps->performance_levels[0].vddci;
+ }
+
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+ vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+ } else {
+ sclk = ps->performance_levels[0].sclk;
+ vddc = ps->performance_levels[0].vddc;
+ }
+
+ if (rps->vce_active) {
+ if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+ sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+ if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
+ mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
+ }
+
+ /* adjusted low state */
+ ps->performance_levels[0].sclk = sclk;
+ ps->performance_levels[0].mclk = mclk;
+ ps->performance_levels[0].vddc = vddc;
+ ps->performance_levels[0].vddci = vddci;
+
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[0].sclk;
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (sclk < ps->performance_levels[i].sclk)
+ sclk = ps->performance_levels[i].sclk;
+ }
+ for (i = 0; i < ps->performance_level_count; i++) {
+ ps->performance_levels[i].sclk = sclk;
+ ps->performance_levels[i].vddc = vddc;
+ }
+ } else {
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+ ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+ if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ }
+ }
+
+ if (disable_mclk_switching) {
+ mclk = ps->performance_levels[0].mclk;
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (mclk < ps->performance_levels[i].mclk)
+ mclk = ps->performance_levels[i].mclk;
+ }
+ for (i = 0; i < ps->performance_level_count; i++) {
+ ps->performance_levels[i].mclk = mclk;
+ ps->performance_levels[i].vddci = vddci;
+ }
+ } else {
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
+ ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
+ if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
+ ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
+ }
+ }
+
+ for (i = 0; i < ps->performance_level_count; i++)
+ btc_adjust_clock_combinations(adev, max_limits,
+ &ps->performance_levels[i]);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].vddc < min_vce_voltage)
+ ps->performance_levels[i].vddc = min_vce_voltage;
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ ps->performance_levels[i].sclk,
+ max_limits->vddc, &ps->performance_levels[i].vddc);
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ ps->performance_levels[i].mclk,
+ max_limits->vddci, &ps->performance_levels[i].vddci);
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ ps->performance_levels[i].mclk,
+ max_limits->vddc, &ps->performance_levels[i].vddc);
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+ adev->clock.current_dispclk,
+ max_limits->vddc, &ps->performance_levels[i].vddc);
+ }
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ btc_apply_voltage_delta_rules(adev,
+ max_limits->vddc, max_limits->vddci,
+ &ps->performance_levels[i].vddc,
+ &ps->performance_levels[i].vddci);
+ }
+
+ ps->dc_compatible = true;
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
+ ps->dc_compatible = false;
+ }
+}
+
+#if 0
+static int si_read_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 *value)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ return amdgpu_si_read_smc_sram_dword(adev,
+ si_pi->soft_regs_start + reg_offset, value,
+ si_pi->sram_end);
+}
+#endif
+
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 value)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ return amdgpu_si_write_smc_sram_dword(adev,
+ si_pi->soft_regs_start + reg_offset,
+ value, si_pi->sram_end);
+}
+
+static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
+{
+ bool ret = false;
+ u32 tmp, width, row, column, bank, density;
+ bool is_memory_gddr5, is_special;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+ is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
+ is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
+ & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
+
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
+ width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
+
+ tmp = RREG32(MC_ARB_RAMCFG);
+ row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
+ column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
+ bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+
+ density = (1 << (row + column - 20 + bank)) * width;
+
+ if ((adev->pdev->device == 0x6819) &&
+ is_memory_gddr5 && is_special && (density == 0x400))
+ ret = true;
+
+ return ret;
+}
+
+static void si_get_leakage_vddc(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u16 vddc, count = 0;
+ int i, ret;
+
+ for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
+
+ if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
+ si_pi->leakage_voltage.entries[count].voltage = vddc;
+ si_pi->leakage_voltage.entries[count].leakage_index =
+ SISLANDS_LEAKAGE_INDEX0 + i;
+ count++;
+ }
+ }
+ si_pi->leakage_voltage.count = count;
+}
+
+static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
+ u32 index, u16 *leakage_voltage)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int i;
+
+ if (leakage_voltage == NULL)
+ return -EINVAL;
+
+ if ((index & 0xff00) != 0xff00)
+ return -EINVAL;
+
+ if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
+ return -EINVAL;
+
+ if (index < SISLANDS_LEAKAGE_INDEX0)
+ return -EINVAL;
+
+ for (i = 0; i < si_pi->leakage_voltage.count; i++) {
+ if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
+ *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
+ return 0;
+ }
+ }
+ return -EAGAIN;
+}
+
+static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ bool want_thermal_protection;
+ enum amdgpu_dpm_event_src dpm_event_src;
+
+ switch (sources) {
+ case 0:
+ default:
+ want_thermal_protection = false;
+ break;
+ case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+ break;
+ }
+
+ if (want_thermal_protection) {
+ WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+ if (pi->thermal_protection)
+ WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ } else {
+ WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ }
+}
+
+static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
+ enum amdgpu_dpm_auto_throttle_src source,
+ bool enable)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+ if (enable) {
+ if (!(pi->active_auto_throttle_sources & (1 << source))) {
+ pi->active_auto_throttle_sources |= 1 << source;
+ si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+ }
+ } else {
+ if (pi->active_auto_throttle_sources & (1 << source)) {
+ pi->active_auto_throttle_sources &= ~(1 << source);
+ si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+ }
+ }
+}
+
+static void si_start_dpm(struct amdgpu_device *adev)
+{
+ WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_stop_dpm(struct amdgpu_device *adev)
+{
+ WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
+{
+ if (enable)
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+ else
+ WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+}
+
+#if 0
+static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
+ u32 thermal_level)
+{
+ PPSMC_Result ret;
+
+ if (thermal_level == 0) {
+ ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+ if (ret == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
+{
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
+}
+#endif
+
+#if 0
+static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
+{
+ if (ac_power)
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+
+ return 0;
+}
+#endif
+
+static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+ PPSMC_Msg msg, u32 parameter)
+{
+ WREG32(SMC_SCRATCH0, parameter);
+ return amdgpu_si_send_msg_to_smc(adev, msg);
+}
+
+static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
+{
+ if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+ enum amdgpu_dpm_forced_level level)
+{
+ struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
+ struct si_ps *ps = si_get_ps(rps);
+ u32 levels = ps->performance_level_count;
+
+ if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
+ return -EINVAL;
+ } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
+ return -EINVAL;
+ } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ adev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+#if 0
+static int si_set_boot_state(struct amdgpu_device *adev)
+{
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+#endif
+
+static int si_set_sw_state(struct amdgpu_device *adev)
+{
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int si_halt_smc(struct amdgpu_device *adev)
+{
+ if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int si_resume_smc(struct amdgpu_device *adev)
+{
+ if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static void si_dpm_start_smc(struct amdgpu_device *adev)
+{
+ amdgpu_si_program_jump_on_start(adev);
+ amdgpu_si_start_smc(adev);
+ amdgpu_si_smc_clock(adev, true);
+}
+
+static void si_dpm_stop_smc(struct amdgpu_device *adev)
+{
+ amdgpu_si_reset_smc(adev);
+ amdgpu_si_smc_clock(adev, false);
+}
+
+static int si_process_firmware_header(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->state_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->soft_regs_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->mc_reg_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->fan_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->arb_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->cac_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->dte_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->spll_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->papm_cfg_table_start = tmp;
+
+ return ret;
+}
+
+static void si_read_clock_registers(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+ si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
+ si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
+ si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
+ si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+ si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+ si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+ si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+ si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+ si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+ si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+ si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+ si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void si_enable_thermal_protection(struct amdgpu_device *adev,
+ bool enable)
+{
+ if (enable)
+ WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ else
+ WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+static void si_enable_acpi_power_management(struct amdgpu_device *adev)
+{
+ WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+#if 0
+static int si_enter_ulp_state(struct amdgpu_device *adev)
+{
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+ udelay(25000);
+
+ return 0;
+}
+
+static int si_exit_ulp_state(struct amdgpu_device *adev)
+{
+ int i;
+
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+ udelay(7000);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(SMC_RESP_0) == 1)
+ break;
+ udelay(1000);
+ }
+
+ return 0;
+}
+#endif
+
+static int si_notify_smc_display_change(struct amdgpu_device *adev,
+ bool has_display)
+{
+ PPSMC_Msg msg = has_display ?
+ PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+ return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static void si_program_response_times(struct amdgpu_device *adev)
+{
+ u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+ u32 vddc_dly, acpi_dly, vbi_dly;
+ u32 reference_clock;
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+ voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
+ backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
+
+ if (voltage_response_time == 0)
+ voltage_response_time = 1000;
+
+ acpi_delay_time = 15000;
+ vbi_time_out = 100000;
+
+ reference_clock = amdgpu_asic_get_xclk(adev);
+
+ vddc_dly = (voltage_response_time * reference_clock) / 100;
+ acpi_dly = (acpi_delay_time * reference_clock) / 100;
+ vbi_dly = (vbi_time_out * reference_clock) / 100;
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+}
+
+static void si_program_ds_registers(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ u32 tmp;
+
+ /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
+ if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
+ tmp = 0x10;
+ else
+ tmp = 0x1;
+
+ if (eg_pi->sclk_deep_sleep) {
+ WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
+ WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
+ ~AUTOSCALE_ON_SS_CLEAR);
+ }
+}
+
+static void si_program_display_gap(struct amdgpu_device *adev)
+{
+ u32 tmp, pipe;
+ int i;
+
+ tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ if (adev->pm.dpm.new_active_crtc_count > 0)
+ tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+ if (adev->pm.dpm.new_active_crtc_count > 1)
+ tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+ WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+ tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+ pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+ if ((adev->pm.dpm.new_active_crtc_count > 0) &&
+ (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+ /* find the first active crtc */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->pm.dpm.new_active_crtcs & (1 << i))
+ break;
+ }
+ if (i == adev->mode_info.num_crtc)
+ pipe = 0;
+ else
+ pipe = i;
+
+ tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+ tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+ WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
+ }
+
+ /* Setting this to false forces the performance state to low if the crtcs are disabled.
+ * This can be a problem on PowerXpress systems or if you want to use the card
+ * for offscreen rendering or compute if there are no crtcs enabled.
+ */
+ si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+}
+
+static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+ if (enable) {
+ if (pi->sclk_ss)
+ WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+ } else {
+ WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+ WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+ }
+}
+
+static void si_setup_bsp(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ u32 xclk = amdgpu_asic_get_xclk(adev);
+
+ r600_calculate_u_and_p(pi->asi,
+ xclk,
+ 16,
+ &pi->bsp,
+ &pi->bsu);
+
+ r600_calculate_u_and_p(pi->pasi,
+ xclk,
+ 16,
+ &pi->pbsp,
+ &pi->pbsu);
+
+
+ pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+ pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+
+ WREG32(CG_BSP, pi->dsp);
+}
+
+static void si_program_git(struct amdgpu_device *adev)
+{
+ WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+}
+
+static void si_program_tp(struct amdgpu_device *adev)
+{
+ int i;
+ enum r600_td td = R600_TD_DFLT;
+
+ for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+ WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+
+ if (td == R600_TD_AUTO)
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+ else
+ WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+
+ if (td == R600_TD_UP)
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+
+ if (td == R600_TD_DOWN)
+ WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+static void si_program_tpp(struct amdgpu_device *adev)
+{
+ WREG32(CG_TPC, R600_TPC_DFLT);
+}
+
+static void si_program_sstp(struct amdgpu_device *adev)
+{
+ WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void si_enable_display_gap(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+ tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
+ tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+ tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
+ DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+ WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void si_program_vc(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+ WREG32(CG_FTV, pi->vrc);
+}
+
+static void si_clear_vc(struct amdgpu_device *adev)
+{
+ WREG32(CG_FTV, 0);
+}
+
+static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+ u8 mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+ return mc_para_index;
+}
+
+static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+ u8 mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 60000) / 5000);
+ }
+ return mc_para_index;
+}
+
+static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ bool strobe_mode = false;
+ u8 result = 0;
+
+ if (mclk <= pi->mclk_strobe_mode_threshold)
+ strobe_mode = true;
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
+ else
+ result = si_get_ddr3_mclk_frequency_ratio(mclk);
+
+ if (strobe_mode)
+ result |= SISLANDS_SMC_STROBE_ENABLE;
+
+ return result;
+}
+
+static int si_upload_firmware(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ amdgpu_si_reset_smc(adev);
+ amdgpu_si_smc_clock(adev, false);
+
+ return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
+}
+
+static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
+ const struct atom_voltage_table *table,
+ const struct amdgpu_phase_shedding_limits_table *limits)
+{
+ u32 data, num_bits, num_levels;
+
+ if ((table == NULL) || (limits == NULL))
+ return false;
+
+ data = table->mask_low;
+
+ num_bits = hweight32(data);
+
+ if (num_bits == 0)
+ return false;
+
+ num_levels = (1 << num_bits);
+
+ if (table->count != num_levels)
+ return false;
+
+ if (limits->count != (num_levels - 1))
+ return false;
+
+ return true;
+}
+
+static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table)
+{
+ unsigned int i, diff;
+
+ if (voltage_table->count <= max_voltage_steps)
+ return;
+
+ diff = voltage_table->count - max_voltage_steps;
+
+ for (i= 0; i < max_voltage_steps; i++)
+ voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+ voltage_table->count = max_voltage_steps;
+}
+
+static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
+ struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+static int si_construct_voltage_tables(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int ret;
+
+ if (pi->voltage_control) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+
+ if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(adev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
+ } else if (si_pi->voltage_control_svi2) {
+ ret = si_get_svi2_voltage_table(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+
+ if (eg_pi->vddci_control) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+
+ if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(adev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddci_voltage_table);
+ }
+ if (si_pi->vddci_control_svi2) {
+ ret = si_get_svi2_voltage_table(adev,
+ &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &eg_pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->mvdd_control) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
+
+ if (ret) {
+ pi->mvdd_control = false;
+ return ret;
+ }
+
+ if (si_pi->mvdd_voltage_table.count == 0) {
+ pi->mvdd_control = false;
+ return -EINVAL;
+ }
+
+ if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(adev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &si_pi->mvdd_voltage_table);
+ }
+
+ if (si_pi->vddc_phase_shed_control) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
+ if (ret)
+ si_pi->vddc_phase_shed_control = false;
+
+ if ((si_pi->vddc_phase_shed_table.count == 0) ||
+ (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
+ si_pi->vddc_phase_shed_control = false;
+ }
+
+ return 0;
+}
+
+static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
+ const struct atom_voltage_table *voltage_table,
+ SISLANDS_SMC_STATETABLE *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < voltage_table->count; i++)
+ table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+}
+
+static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
+ SISLANDS_SMC_STATETABLE *table)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u8 i;
+
+ if (si_pi->voltage_control_svi2) {
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+ si_pi->svc_gpio_id);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+ si_pi->svd_gpio_id);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+ 2);
+ } else {
+ if (eg_pi->vddc_voltage_table.count) {
+ si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+ for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+ if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+ table->maxVDDCIndexInPPTable = i;
+ break;
+ }
+ }
+ }
+
+ if (eg_pi->vddci_voltage_table.count) {
+ si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
+
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ }
+
+
+ if (si_pi->mvdd_voltage_table.count) {
+ si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
+
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+ cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+ }
+
+ if (si_pi->vddc_phase_shed_control) {
+ if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+ si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
+
+ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
+ cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+ (u32)si_pi->vddc_phase_shed_table.phase_delay);
+ } else {
+ si_pi->vddc_phase_shed_control = false;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+ const struct atom_voltage_table *table,
+ u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+ unsigned int i;
+
+ for (i = 0; i < table->count; i++) {
+ if (value <= table->entries[i].value) {
+ voltage->index = (u8)i;
+ voltage->value = cpu_to_be16(table->entries[i].value);
+ break;
+ }
+ }
+
+ if (i >= table->count)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
+ SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (pi->mvdd_control) {
+ if (mclk <= pi->mvdd_split_frequency)
+ voltage->index = 0;
+ else
+ voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
+
+ voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
+ }
+ return 0;
+}
+
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+ SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+ u16 *std_voltage)
+{
+ u16 v_index;
+ bool voltage_found = false;
+ *std_voltage = be16_to_cpu(voltage->value);
+
+ if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
+ if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+ return -EINVAL;
+
+ for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (be16_to_cpu(voltage->value) ==
+ (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+ *std_voltage =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+ else
+ *std_voltage =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+ break;
+ }
+ }
+
+ if (!voltage_found) {
+ for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (be16_to_cpu(voltage->value) <=
+ (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+ *std_voltage =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+ else
+ *std_voltage =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+ break;
+ }
+ }
+ }
+ } else {
+ if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+ *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
+ }
+ }
+
+ return 0;
+}
+
+static int si_populate_std_voltage_value(struct amdgpu_device *adev,
+ u16 value, u8 index,
+ SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+ voltage->index = index;
+ voltage->value = cpu_to_be16(value);
+
+ return 0;
+}
+
+static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
+ const struct amdgpu_phase_shedding_limits_table *limits,
+ u16 voltage, u32 sclk, u32 mclk,
+ SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
+{
+ unsigned int i;
+
+ for (i = 0; i < limits->count; i++) {
+ if ((voltage <= limits->entries[i].voltage) &&
+ (sclk <= limits->entries[i].sclk) &&
+ (mclk <= limits->entries[i].mclk))
+ break;
+ }
+
+ smc_voltage->phase_settings = (u8)i;
+
+ return 0;
+}
+
+static int si_init_arb_table_index(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+ return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
+ tmp, si_pi->sram_end);
+}
+
+static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
+{
+ return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int si_reset_to_default(struct amdgpu_device *adev)
+{
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ tmp = (tmp >> 24) & 0xff;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
+ u32 engine_clock)
+{
+ u32 dram_rows;
+ u32 dram_refresh_rate;
+ u32 mc_arb_rfsh_rate;
+ u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+
+ if (tmp >= 4)
+ dram_rows = 16384;
+ else
+ dram_rows = 1 << (tmp + 10);
+
+ dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
+ mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+
+ return mc_arb_rfsh_rate;
+}
+
+static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
+ struct rv7xx_pl *pl,
+ SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
+{
+ u32 dram_timing;
+ u32 dram_timing2;
+ u32 burst_time;
+
+ arb_regs->mc_arb_rfsh_rate =
+ (u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
+
+ amdgpu_atombios_set_engine_dram_timings(adev,
+ pl->sclk,
+ pl->mclk);
+
+ dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+ dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+ burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+ arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
+ arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
+ arb_regs->mc_arb_burst_time = (u8)burst_time;
+
+ return 0;
+}
+
+static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ unsigned int first_arb_set)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+ int i, ret = 0;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
+ if (ret)
+ break;
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ si_pi->arb_table_start +
+ offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+ sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
+ (u8 *)&arb_regs,
+ sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+ si_pi->sram_end);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state)
+{
+ return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
+ SISLANDS_DRIVER_STATE_ARB_INDEX);
+}
+
+static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
+ struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (pi->mvdd_control)
+ return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
+ si_pi->mvdd_bootup_value, voltage);
+
+ return 0;
+}
+
+static int si_populate_smc_initial_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_initial_state,
+ SISLANDS_SMC_STATETABLE *table)
+{
+ struct si_ps *initial_state = si_get_ps(amdgpu_initial_state);
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 reg;
+ int ret;
+
+ table->initialState.levels[0].mclk.vDLL_CNTL =
+ cpu_to_be32(si_pi->clock_registers.dll_cntl);
+ table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
+ table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
+ table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
+ table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
+ table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
+ table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
+ table->initialState.levels[0].mclk.vMPLL_SS =
+ cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+ table->initialState.levels[0].mclk.vMPLL_SS2 =
+ cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+ table->initialState.levels[0].mclk.mclk_value =
+ cpu_to_be32(initial_state->performance_levels[0].mclk);
+
+ table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
+ table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
+ table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
+ table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
+ table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
+ table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
+
+ table->initialState.levels[0].sclk.sclk_value =
+ cpu_to_be32(initial_state->performance_levels[0].sclk);
+
+ table->initialState.levels[0].arbRefreshState =
+ SISLANDS_INITIAL_STATE_ARB_INDEX;
+
+ table->initialState.levels[0].ACIndex = 0;
+
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ initial_state->performance_levels[0].vddc,
+ &table->initialState.levels[0].vddc);
+
+ if (!ret) {
+ u16 std_vddc;
+
+ ret = si_get_std_voltage_value(adev,
+ &table->initialState.levels[0].vddc,
+ &std_vddc);
+ if (!ret)
+ si_populate_std_voltage_value(adev, std_vddc,
+ table->initialState.levels[0].vddc.index,
+ &table->initialState.levels[0].std_vddc);
+ }
+
+ if (eg_pi->vddci_control)
+ si_populate_voltage_value(adev,
+ &eg_pi->vddci_voltage_table,
+ initial_state->performance_levels[0].vddci,
+ &table->initialState.levels[0].vddci);
+
+ if (si_pi->vddc_phase_shed_control)
+ si_populate_phase_shedding_value(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ initial_state->performance_levels[0].vddc,
+ initial_state->performance_levels[0].sclk,
+ initial_state->performance_levels[0].mclk,
+ &table->initialState.levels[0].vddc);
+
+ si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+
+ reg = CG_R(0xffff) | CG_L(0);
+ table->initialState.levels[0].aT = cpu_to_be32(reg);
+ table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+ table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ table->initialState.levels[0].strobeMode =
+ si_get_strobe_mode_settings(adev,
+ initial_state->performance_levels[0].mclk);
+
+ if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
+ table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+ else
+ table->initialState.levels[0].mcFlags = 0;
+ }
+
+ table->initialState.levelCount = 1;
+
+ table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+ table->initialState.levels[0].dpm2.MaxPS = 0;
+ table->initialState.levels[0].dpm2.NearTDPDec = 0;
+ table->initialState.levels[0].dpm2.AboveSafeInc = 0;
+ table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+ table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+ reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+ reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+ return 0;
+}
+
+static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
+ SISLANDS_SMC_STATETABLE *table)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+ u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+ u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+ u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+ u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+ u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+ u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+ u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+ u32 reg;
+ int ret;
+
+ table->ACPIState = table->initialState;
+
+ table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (pi->acpi_vddc) {
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+ if (!ret) {
+ u16 std_vddc;
+
+ ret = si_get_std_voltage_value(adev,
+ &table->ACPIState.levels[0].vddc, &std_vddc);
+ if (!ret)
+ si_populate_std_voltage_value(adev, std_vddc,
+ table->ACPIState.levels[0].vddc.index,
+ &table->ACPIState.levels[0].std_vddc);
+ }
+ table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+
+ if (si_pi->vddc_phase_shed_control) {
+ si_populate_phase_shedding_value(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ pi->acpi_vddc,
+ 0,
+ 0,
+ &table->ACPIState.levels[0].vddc);
+ }
+ } else {
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+ if (!ret) {
+ u16 std_vddc;
+
+ ret = si_get_std_voltage_value(adev,
+ &table->ACPIState.levels[0].vddc, &std_vddc);
+
+ if (!ret)
+ si_populate_std_voltage_value(adev, std_vddc,
+ table->ACPIState.levels[0].vddc.index,
+ &table->ACPIState.levels[0].std_vddc);
+ }
+ table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ AMDGPU_PCIE_GEN1);
+
+ if (si_pi->vddc_phase_shed_control)
+ si_populate_phase_shedding_value(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ pi->min_vddc_in_table,
+ 0,
+ 0,
+ &table->ACPIState.levels[0].vddc);
+ }
+
+ if (pi->acpi_vddc) {
+ if (eg_pi->acpi_vddci)
+ si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+ eg_pi->acpi_vddci,
+ &table->ACPIState.levels[0].vddci);
+ }
+
+ mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+ table->ACPIState.levels[0].mclk.vDLL_CNTL =
+ cpu_to_be32(dll_cntl);
+ table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ cpu_to_be32(mclk_pwrmgt_cntl);
+ table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ cpu_to_be32(mpll_ad_func_cntl);
+ table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ cpu_to_be32(mpll_dq_func_cntl);
+ table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ cpu_to_be32(mpll_func_cntl);
+ table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ cpu_to_be32(mpll_func_cntl_1);
+ table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ cpu_to_be32(mpll_func_cntl_2);
+ table->ACPIState.levels[0].mclk.vMPLL_SS =
+ cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+ table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+ cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+ table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ cpu_to_be32(spll_func_cntl);
+ table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ cpu_to_be32(spll_func_cntl_2);
+ table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ cpu_to_be32(spll_func_cntl_3);
+ table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ cpu_to_be32(spll_func_cntl_4);
+
+ table->ACPIState.levels[0].mclk.mclk_value = 0;
+ table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+ si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+
+ if (eg_pi->dynamic_ac_timing)
+ table->ACPIState.levels[0].ACIndex = 0;
+
+ table->ACPIState.levels[0].dpm2.MaxPS = 0;
+ table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
+ table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
+ table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+ table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+ reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+ reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+ return 0;
+}
+
+static int si_populate_ulv_state(struct amdgpu_device *adev,
+ SISLANDS_SMC_SWSTATE *state)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+ u32 sclk_in_sr = 1350; /* ??? */
+ int ret;
+
+ ret = si_convert_power_level_to_smc(adev, &ulv->pl,
+ &state->levels[0]);
+ if (!ret) {
+ if (eg_pi->sclk_deep_sleep) {
+ if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+ state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+ else
+ state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+ }
+ if (ulv->one_pcie_lane_in_ulv)
+ state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
+ state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+ state->levels[0].ACIndex = 1;
+ state->levels[0].std_vddc = state->levels[0].vddc;
+ state->levelCount = 1;
+
+ state->flags |= PPSMC_SWSTATE_FLAG_DC;
+ }
+
+ return ret;
+}
+
+static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+ SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+ int ret;
+
+ ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
+ &arb_regs);
+ if (ret)
+ return ret;
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
+ ulv->volt_change_delay);
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ si_pi->arb_table_start +
+ offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+ sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
+ (u8 *)&arb_regs,
+ sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+ si_pi->sram_end);
+
+ return ret;
+}
+
+static void si_get_mvdd_configuration(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+ pi->mvdd_split_frequency = 30000;
+}
+
+static int si_init_smc_table(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
+ const struct si_ulv_param *ulv = &si_pi->ulv;
+ SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
+ int ret;
+ u32 lane_width;
+ u32 vr_hot_gpio;
+
+ si_populate_smc_voltage_tables(adev, table);
+
+ switch (adev->pm.int_thermal_type) {
+ case THERMAL_TYPE_SI:
+ case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+ table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+ break;
+ case THERMAL_TYPE_NONE:
+ table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+ break;
+ default:
+ table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+ break;
+ }
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+ table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
+ if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
+ table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+ }
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+ table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
+ table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
+ table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
+ vr_hot_gpio = adev->pm.dpm.backbias_response_time;
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
+ vr_hot_gpio);
+ }
+
+ ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
+ if (ret)
+ return ret;
+
+ ret = si_populate_smc_acpi_state(adev, table);
+ if (ret)
+ return ret;
+
+ table->driverState = table->initialState;
+
+ ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
+ SISLANDS_INITIAL_STATE_ARB_INDEX);
+ if (ret)
+ return ret;
+
+ if (ulv->supported && ulv->pl.vddc) {
+ ret = si_populate_ulv_state(adev, &table->ULVState);
+ if (ret)
+ return ret;
+
+ ret = si_program_ulv_memory_timing_parameters(adev);
+ if (ret)
+ return ret;
+
+ WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
+ WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+
+ lane_width = amdgpu_get_pcie_lanes(adev);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+ } else {
+ table->ULVState = table->initialState;
+ }
+
+ return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
+ (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
+ si_pi->sram_end);
+}
+
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+ u32 engine_clock,
+ SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct atom_clock_dividers dividers;
+ u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+ u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+ u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+ u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
+ u64 tmp;
+ u32 reference_clock = adev->clock.spll.reference_freq;
+ u32 reference_divider;
+ u32 fbdiv;
+ int ret;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ engine_clock, false, &dividers);
+ if (ret)
+ return ret;
+
+ reference_divider = 1 + dividers.ref_div;
+
+ tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
+ do_div(tmp, reference_clock);
+ fbdiv = (u32) tmp;
+
+ spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+ spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+ spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+ spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+ spll_func_cntl_3 |= SPLL_DITHEN;
+
+ if (pi->sclk_ss) {
+ struct amdgpu_atom_ss ss;
+ u32 vco_freq = engine_clock * dividers.post_div;
+
+ if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+ u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+ u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum &= ~CLK_S_MASK;
+ cg_spll_spread_spectrum |= CLK_S(clk_s);
+ cg_spll_spread_spectrum |= SSEN;
+
+ cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ }
+ }
+
+ sclk->sclk_value = engine_clock;
+ sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
+ sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
+ sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
+ sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
+ sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
+ sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
+
+ return 0;
+}
+
+static int si_populate_sclk_value(struct amdgpu_device *adev,
+ u32 engine_clock,
+ SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+ SISLANDS_SMC_SCLK_VALUE sclk_tmp;
+ int ret;
+
+ ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
+ if (!ret) {
+ sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
+ sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
+ sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
+ sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
+ sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
+ sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
+ sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
+ }
+
+ return ret;
+}
+
+static int si_populate_mclk_value(struct amdgpu_device *adev,
+ u32 engine_clock,
+ u32 memory_clock,
+ SISLANDS_SMC_MCLK_VALUE *mclk,
+ bool strobe_mode,
+ bool dll_state_on)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+ u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+ u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+ u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+ u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1;
+ u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2;
+ struct atom_mpll_param mpll_param;
+ int ret;
+
+ ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
+ if (ret)
+ return ret;
+
+ mpll_func_cntl &= ~BWCTRL_MASK;
+ mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+ mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+ mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+ CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+ mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+ mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+ mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+ YCLK_POST_DIV(mpll_param.post_div);
+ }
+
+ if (pi->mclk_ss) {
+ struct amdgpu_atom_ss ss;
+ u32 freq_nom;
+ u32 tmp;
+ u32 reference_clock = adev->clock.mpll.reference_freq;
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ freq_nom = memory_clock * 4;
+ else
+ freq_nom = memory_clock * 2;
+
+ tmp = freq_nom / reference_clock;
+ tmp = tmp * tmp;
+ if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ u32 clks = reference_clock * 5 / ss.rate;
+ u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 &= ~CLKV_MASK;
+ mpll_ss1 |= CLKV(clkv);
+
+ mpll_ss2 &= ~CLKS_MASK;
+ mpll_ss2 |= CLKS(clks);
+ }
+ }
+
+ mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+ mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+ if (dll_state_on)
+ mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+ else
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ mclk->mclk_value = cpu_to_be32(memory_clock);
+ mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+ mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
+ mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
+ mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+ mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+ mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+ mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
+ mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
+ mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+ return 0;
+}
+
+static void si_populate_smc_sp(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct si_ps *ps = si_get_ps(amdgpu_state);
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ int i;
+
+ for (i = 0; i < ps->performance_level_count - 1; i++)
+ smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+ smc_state->levels[ps->performance_level_count - 1].bSP =
+ cpu_to_be32(pi->psp);
+}
+
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+ struct rv7xx_pl *pl,
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int ret;
+ bool dll_state_on;
+ u16 std_vddc;
+ bool gmc_pg = false;
+
+ if (eg_pi->pcie_performance_request &&
+ (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+ level->gen2PCIE = (u8)si_pi->force_pcie_gen;
+ else
+ level->gen2PCIE = (u8)pl->pcie_gen;
+
+ ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
+ if (ret)
+ return ret;
+
+ level->mcFlags = 0;
+
+ if (pi->mclk_stutter_mode_threshold &&
+ (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+ !eg_pi->uvd_enabled &&
+ (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (adev->pm.dpm.new_active_crtc_count <= 2)) {
+ level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
+
+ if (gmc_pg)
+ level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
+ }
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ if (pl->mclk > pi->mclk_edc_enable_threshold)
+ level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
+
+ if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+ level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
+
+ level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
+
+ if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
+ if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
+ ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ else
+ dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+ } else {
+ dll_state_on = false;
+ }
+ } else {
+ level->strobeMode = si_get_strobe_mode_settings(adev,
+ pl->mclk);
+
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ }
+
+ ret = si_populate_mclk_value(adev,
+ pl->sclk,
+ pl->mclk,
+ &level->mclk,
+ (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
+ if (ret)
+ return ret;
+
+ ret = si_populate_voltage_value(adev,
+ &eg_pi->vddc_voltage_table,
+ pl->vddc, &level->vddc);
+ if (ret)
+ return ret;
+
+
+ ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
+ if (ret)
+ return ret;
+
+ ret = si_populate_std_voltage_value(adev, std_vddc,
+ level->vddc.index, &level->std_vddc);
+ if (ret)
+ return ret;
+
+ if (eg_pi->vddci_control) {
+ ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+ pl->vddci, &level->vddci);
+ if (ret)
+ return ret;
+ }
+
+ if (si_pi->vddc_phase_shed_control) {
+ ret = si_populate_phase_shedding_value(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ pl->vddc,
+ pl->sclk,
+ pl->mclk,
+ &level->vddc);
+ if (ret)
+ return ret;
+ }
+
+ level->MaxPoweredUpCU = si_pi->max_cu;
+
+ ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
+
+ return ret;
+}
+
+static int si_populate_smc_t(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ u32 a_t;
+ u32 t_l, t_h;
+ u32 high_bsp;
+ int i, ret;
+
+ if (state->performance_level_count >= 9)
+ return -EINVAL;
+
+ if (state->performance_level_count < 2) {
+ a_t = CG_R(0xffff) | CG_L(0);
+ smc_state->levels[0].aT = cpu_to_be32(a_t);
+ return 0;
+ }
+
+ smc_state->levels[0].aT = cpu_to_be32(0);
+
+ for (i = 0; i <= state->performance_level_count - 2; i++) {
+ ret = r600_calculate_at(
+ (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
+ 100 * R600_AH_DFLT,
+ state->performance_levels[i + 1].sclk,
+ state->performance_levels[i].sclk,
+ &t_l,
+ &t_h);
+
+ if (ret) {
+ t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
+ t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
+ }
+
+ a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
+ a_t |= CG_R(t_l * pi->bsp / 20000);
+ smc_state->levels[i].aT = cpu_to_be32(a_t);
+
+ high_bsp = (i == state->performance_level_count - 2) ?
+ pi->pbsp : pi->bsp;
+ a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+ smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
+ }
+
+ return 0;
+}
+
+static int si_disable_ulv(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+
+ if (ulv->supported)
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+
+ return 0;
+}
+
+static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ const struct si_power_info *si_pi = si_get_pi(adev);
+ const struct si_ulv_param *ulv = &si_pi->ulv;
+ const struct si_ps *state = si_get_ps(amdgpu_state);
+ int i;
+
+ if (state->performance_levels[0].mclk != ulv->pl.mclk)
+ return false;
+
+ /* XXX validate against display requirements! */
+
+ for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
+ if (adev->clock.current_dispclk <=
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
+ if (ulv->pl.vddc <
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
+ return false;
+ }
+ }
+
+ if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
+ return false;
+
+ return true;
+}
+
+static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state)
+{
+ const struct si_power_info *si_pi = si_get_pi(adev);
+ const struct si_ulv_param *ulv = &si_pi->ulv;
+
+ if (ulv->supported) {
+ if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+ return 0;
+}
+
+static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ int i, ret;
+ u32 threshold;
+ u32 sclk_in_sr = 1350; /* ??? */
+
+ if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
+ return -EINVAL;
+
+ threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
+
+ if (amdgpu_state->vclk && amdgpu_state->dclk) {
+ eg_pi->uvd_enabled = true;
+ if (eg_pi->smu_uvd_hs)
+ smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
+ } else {
+ eg_pi->uvd_enabled = false;
+ }
+
+ if (state->dc_compatible)
+ smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+ smc_state->levelCount = 0;
+ for (i = 0; i < state->performance_level_count; i++) {
+ if (eg_pi->sclk_deep_sleep) {
+ if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
+ if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+ smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+ else
+ smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+ }
+ }
+
+ ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
+ &smc_state->levels[i]);
+ smc_state->levels[i].arbRefreshState =
+ (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
+
+ if (ret)
+ return ret;
+
+ if (ni_pi->enable_power_containment)
+ smc_state->levels[i].displayWatermark =
+ (state->performance_levels[i].sclk < threshold) ?
+ PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+ else
+ smc_state->levels[i].displayWatermark = (i < 2) ?
+ PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ if (eg_pi->dynamic_ac_timing)
+ smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
+ else
+ smc_state->levels[i].ACIndex = 0;
+
+ smc_state->levelCount++;
+ }
+
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_watermark_threshold,
+ threshold / 512);
+
+ si_populate_smc_sp(adev, amdgpu_state, smc_state);
+
+ ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
+ if (ret)
+ ni_pi->enable_power_containment = false;
+
+ ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
+ if (ret)
+ ni_pi->enable_sq_ramping = false;
+
+ return si_populate_smc_t(adev, amdgpu_state, smc_state);
+}
+
+static int si_upload_sw_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+ int ret;
+ u32 address = si_pi->state_table_start +
+ offsetof(SISLANDS_SMC_STATETABLE, driverState);
+ u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+ ((new_state->performance_level_count - 1) *
+ sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
+ SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
+
+ memset(smc_state, 0, state_size);
+
+ ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
+ if (ret)
+ return ret;
+
+ return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+ state_size, si_pi->sram_end);
+}
+
+static int si_upload_ulv_state(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+ int ret = 0;
+
+ if (ulv->supported && ulv->pl.vddc) {
+ u32 address = si_pi->state_table_start +
+ offsetof(SISLANDS_SMC_STATETABLE, ULVState);
+ SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
+ u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+
+ memset(smc_state, 0, state_size);
+
+ ret = si_populate_ulv_state(adev, smc_state);
+ if (!ret)
+ ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+ state_size, si_pi->sram_end);
+ }
+
+ return ret;
+}
+
+static int si_upload_smc_data(struct amdgpu_device *adev)
+{
+ struct amdgpu_crtc *amdgpu_crtc = NULL;
+ int i;
+
+ if (adev->pm.dpm.new_active_crtc_count == 0)
+ return 0;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
+ amdgpu_crtc = adev->mode_info.crtcs[i];
+ break;
+ }
+ }
+
+ if (amdgpu_crtc == NULL)
+ return 0;
+
+ if (amdgpu_crtc->line_time <= 0)
+ return 0;
+
+ if (si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_crtc_index,
+ amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
+ return 0;
+
+ if (si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+ amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+ return 0;
+
+ if (si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+ amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+ return 0;
+
+ return 0;
+}
+
+static int si_set_mc_special_registers(struct amdgpu_device *adev,
+ struct si_mc_reg_table *table)
+{
+ u8 i, j, k;
+ u32 temp_reg;
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch (table->mc_reg_address[i].s1) {
+ case MC_SEQ_MISC1:
+ temp_reg = RREG32(MC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ j++;
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ temp_reg = RREG32(MC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+ table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ j++;
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ }
+ break;
+ case MC_SEQ_RESERVE_M:
+ temp_reg = RREG32(MC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
+ for(k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ j++;
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+ bool result = true;
+ switch (in_reg) {
+ case MC_SEQ_RAS_TIMING:
+ *out_reg = MC_SEQ_RAS_TIMING_LP;
+ break;
+ case MC_SEQ_CAS_TIMING:
+ *out_reg = MC_SEQ_CAS_TIMING_LP;
+ break;
+ case MC_SEQ_MISC_TIMING:
+ *out_reg = MC_SEQ_MISC_TIMING_LP;
+ break;
+ case MC_SEQ_MISC_TIMING2:
+ *out_reg = MC_SEQ_MISC_TIMING2_LP;
+ break;
+ case MC_SEQ_RD_CTL_D0:
+ *out_reg = MC_SEQ_RD_CTL_D0_LP;
+ break;
+ case MC_SEQ_RD_CTL_D1:
+ *out_reg = MC_SEQ_RD_CTL_D1_LP;
+ break;
+ case MC_SEQ_WR_CTL_D0:
+ *out_reg = MC_SEQ_WR_CTL_D0_LP;
+ break;
+ case MC_SEQ_WR_CTL_D1:
+ *out_reg = MC_SEQ_WR_CTL_D1_LP;
+ break;
+ case MC_PMG_CMD_EMRS:
+ *out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+ case MC_PMG_CMD_MRS:
+ *out_reg = MC_SEQ_PMG_CMD_MRS_LP;
+ break;
+ case MC_PMG_CMD_MRS1:
+ *out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+ case MC_SEQ_PMG_TIMING:
+ *out_reg = MC_SEQ_PMG_TIMING_LP;
+ break;
+ case MC_PMG_CMD_MRS2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+ case MC_SEQ_WR_CTL_2:
+ *out_reg = MC_SEQ_WR_CTL_2_LP;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static void si_set_valid_flag(struct si_mc_reg_table *table)
+{
+ u8 i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
+ table->valid_flag |= 1 << i;
+ break;
+ }
+ }
+ }
+}
+
+static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
+{
+ u32 i;
+ u16 address;
+
+ for (i = 0; i < table->last; i++)
+ table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+ address : table->mc_reg_address[i].s1;
+
+}
+
+static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+ struct si_mc_reg_table *si_table)
+{
+ u8 i, j;
+
+ if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+ return -EINVAL;
+
+ for (i = 0; i < table->last; i++)
+ si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+ si_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ si_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ si_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+ si_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct atom_mc_reg_table *table;
+ struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
+ u8 module_index = rv770_get_memory_module_index(adev);
+ int ret;
+
+ table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+ WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+ WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+ WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+ WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+ WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+ WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+ WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+ WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+ WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+ WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+ WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+ ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+ if (ret)
+ goto init_mc_done;
+
+ ret = si_copy_vbios_mc_reg_table(table, si_table);
+ if (ret)
+ goto init_mc_done;
+
+ si_set_s0_mc_reg_index(si_table);
+
+ ret = si_set_mc_special_registers(adev, si_table);
+ if (ret)
+ goto init_mc_done;
+
+ si_set_valid_flag(si_table);
+
+init_mc_done:
+ kfree(table);
+
+ return ret;
+
+}
+
+static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
+ SMC_SIslands_MCRegisters *mc_reg_table)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 i, j;
+
+ for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+ if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+ if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ break;
+ mc_reg_table->address[i].s0 =
+ cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+ mc_reg_table->last = (u8)i;
+}
+
+static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
+ SMC_SIslands_MCRegisterSet *data,
+ u32 num_entries, u32 valid_flag)
+{
+ u32 i, j;
+
+ for(i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & (1 << j)) {
+ data->value[i] = cpu_to_be32(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
+ struct rv7xx_pl *pl,
+ SMC_SIslands_MCRegisterSet *mc_reg_table_data)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 i = 0;
+
+ for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
+ if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+ break;
+ }
+
+ if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, si_pi->mc_reg_table.last,
+ si_pi->mc_reg_table.valid_flag);
+}
+
+static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SMC_SIslands_MCRegisters *mc_reg_table)
+{
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ int i;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ si_convert_mc_reg_table_entry_to_smc(adev,
+ &state->performance_levels[i],
+ &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
+ }
+}
+
+static int si_populate_mc_reg_table(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_boot_state)
+{
+ struct si_ps *boot_state = si_get_ps(amdgpu_boot_state);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+ SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+ memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
+
+ si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
+
+ si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
+ &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
+
+ si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+ &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
+ si_pi->mc_reg_table.last,
+ si_pi->mc_reg_table.valid_flag);
+
+ if (ulv->supported && ulv->pl.vddc != 0)
+ si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
+ &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
+ else
+ si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+ &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
+ si_pi->mc_reg_table.last,
+ si_pi->mc_reg_table.valid_flag);
+
+ si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
+
+ return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
+ (u8 *)smc_mc_reg_table,
+ sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
+}
+
+static int si_upload_mc_reg_table(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state)
+{
+ struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 address = si_pi->mc_reg_table_start +
+ offsetof(SMC_SIslands_MCRegisters,
+ data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
+ SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+ memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+ si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
+
+ return amdgpu_si_copy_bytes_to_smc(adev, address,
+ (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
+ sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
+ si_pi->sram_end);
+}
+
+static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
+{
+ if (enable)
+ WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ else
+ WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ int i;
+ u16 pcie_speed, max_speed = 0;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ pcie_speed = state->performance_levels[i].pcie_gen;
+ if (max_speed < pcie_speed)
+ max_speed = pcie_speed;
+ }
+ return max_speed;
+}
+
+static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
+{
+ u32 speed_cntl;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+ return (u16)speed_cntl;
+}
+
+static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+ enum amdgpu_pcie_gen current_link_speed;
+
+ if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+ current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
+ else
+ current_link_speed = si_pi->force_pcie_gen;
+
+ si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->pspp_notify_required = false;
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+#if defined(CONFIG_ACPI)
+ case AMDGPU_PCIE_GEN3:
+ if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+ break;
+ si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
+ if (current_link_speed == AMDGPU_PCIE_GEN2)
+ break;
+ case AMDGPU_PCIE_GEN2:
+ if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+ break;
+#endif
+ default:
+ si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ si_pi->pspp_notify_required = true;
+ }
+}
+
+static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+ u8 request;
+
+ if (si_pi->pspp_notify_required) {
+ if (target_link_speed == AMDGPU_PCIE_GEN3)
+ request = PCIE_PERF_REQ_PECI_GEN3;
+ else if (target_link_speed == AMDGPU_PCIE_GEN2)
+ request = PCIE_PERF_REQ_PECI_GEN2;
+ else
+ request = PCIE_PERF_REQ_PECI_GEN1;
+
+ if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+ (si_get_current_pcie_speed(adev) > 0))
+ return;
+
+#if defined(CONFIG_ACPI)
+ amdgpu_acpi_pcie_performance_request(adev, request, false);
+#endif
+ }
+}
+
+#if 0
+static int si_ds_request(struct amdgpu_device *adev,
+ bool ds_status_on, u32 count_write)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+
+ if (eg_pi->sclk_deep_sleep) {
+ if (ds_status_on)
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
+ PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ else
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
+ PPSMC_Result_OK) ? 0 : -EINVAL;
+ }
+ return 0;
+}
+#endif
+
+static void si_set_max_cu_value(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (adev->asic_type == CHIP_VERDE) {
+ switch (adev->pdev->device) {
+ case 0x6820:
+ case 0x6825:
+ case 0x6821:
+ case 0x6823:
+ case 0x6827:
+ si_pi->max_cu = 10;
+ break;
+ case 0x682D:
+ case 0x6824:
+ case 0x682F:
+ case 0x6826:
+ si_pi->max_cu = 8;
+ break;
+ case 0x6828:
+ case 0x6830:
+ case 0x6831:
+ case 0x6838:
+ case 0x6839:
+ case 0x683D:
+ si_pi->max_cu = 10;
+ break;
+ case 0x683B:
+ case 0x683F:
+ case 0x6829:
+ si_pi->max_cu = 8;
+ break;
+ default:
+ si_pi->max_cu = 0;
+ break;
+ }
+ } else {
+ si_pi->max_cu = 0;
+ }
+}
+
+static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
+ struct amdgpu_clock_voltage_dependency_table *table)
+{
+ u32 i;
+ int j;
+ u16 leakage_voltage;
+
+ if (table) {
+ for (i = 0; i < table->count; i++) {
+ switch (si_get_leakage_voltage_from_leakage_index(adev,
+ table->entries[i].v,
+ &leakage_voltage)) {
+ case 0:
+ table->entries[i].v = leakage_voltage;
+ break;
+ case -EAGAIN:
+ return -EINVAL;
+ case -EINVAL:
+ default:
+ break;
+ }
+ }
+
+ for (j = (table->count - 2); j >= 0; j--) {
+ table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
+ table->entries[j].v : table->entries[j + 1].v;
+ }
+ }
+ return 0;
+}
+
+static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ ret = si_patch_single_dependency_table_based_on_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+ if (ret)
+ DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
+ ret = si_patch_single_dependency_table_based_on_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+ if (ret)
+ DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
+ ret = si_patch_single_dependency_table_based_on_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+ if (ret)
+ DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
+ return ret;
+}
+
+static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ u32 lane_width;
+ u32 new_lane_width =
+ (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ u32 current_lane_width =
+ (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+
+ if (new_lane_width != current_lane_width) {
+ amdgpu_set_pcie_lanes(adev, new_lane_width);
+ lane_width = amdgpu_get_pcie_lanes(adev);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+ }
+}
+
+static void si_dpm_setup_asic(struct amdgpu_device *adev)
+{
+ si_read_clock_registers(adev);
+ si_enable_acpi_power_management(adev);
+}
+
+static int si_thermal_enable_alert(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+ if (enable) {
+ PPSMC_Result result;
+
+ thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ WREG32(CG_THERMAL_INT, thermal_int);
+ result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ } else {
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ WREG32(CG_THERMAL_INT, thermal_int);
+ }
+
+ return 0;
+}
+
+static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+ WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+ WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+ adev->pm.dpm.thermal.min_temp = low_temp;
+ adev->pm.dpm.thermal.max_temp = high_temp;
+
+ return 0;
+}
+
+static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+
+ if (si_pi->fan_ctrl_is_in_default_mode) {
+ tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ si_pi->fan_ctrl_default_mode = tmp;
+ tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ si_pi->t_min = tmp;
+ si_pi->fan_ctrl_is_in_default_mode = false;
+ }
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(0);
+ WREG32(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(mode);
+ WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+ u32 duty100;
+ u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ u16 fdo_min, slope1, slope2;
+ u32 reference_clock, tmp;
+ int ret;
+ u64 tmp64;
+
+ if (!si_pi->fan_table_start) {
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0) {
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (u16)tmp64;
+
+ t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
+ t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
+
+ pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
+ pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
+
+ slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
+ fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
+ fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
+ fan_table.slope1 = cpu_to_be16(slope1);
+ fan_table.slope2 = cpu_to_be16(slope2);
+ fan_table.fdo_min = cpu_to_be16(fdo_min);
+ fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
+ fan_table.hys_up = cpu_to_be16(1);
+ fan_table.hys_slope = cpu_to_be16(1);
+ fan_table.temp_resp_lim = cpu_to_be16(5);
+ reference_clock = amdgpu_asic_get_xclk(adev);
+
+ fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
+ reference_clock) / 1600);
+ fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+ tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ fan_table.temp_src = (uint8_t)tmp;
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ si_pi->fan_table_start,
+ (u8 *)(&fan_table),
+ sizeof(fan_table),
+ si_pi->sram_end);
+
+ if (ret) {
+ DRM_ERROR("Failed to load fan table to the SMC.");
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ }
+
+ return ret;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PPSMC_Result ret;
+
+ ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
+ if (ret == PPSMC_Result_OK) {
+ si_pi->fan_is_controlled_by_smc = true;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PPSMC_Result ret;
+
+ ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
+
+ if (ret == PPSMC_Result_OK) {
+ si_pi->fan_is_controlled_by_smc = false;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+ u32 *speed)
+{
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)duty * 100;
+ do_div(tmp64, duty100);
+ *speed = (u32)tmp64;
+
+ if (*speed > 100)
+ *speed = 100;
+
+ return 0;
+}
+
+static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+ u32 speed)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (si_pi->fan_is_controlled_by_smc)
+ return -EINVAL;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (u32)tmp64;
+
+ tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+ tmp |= FDO_STATIC_DUTY(duty);
+ WREG32(CG_FDO_CTRL0, tmp);
+
+ return 0;
+}
+
+static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+{
+ if (mode) {
+ /* stop auto-manage */
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(adev);
+ si_fan_ctrl_set_static_mode(adev, mode);
+ } else {
+ /* restart auto-manage */
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ si_thermal_start_smc_fan_control(adev);
+ else
+ si_fan_ctrl_set_default_mode(adev);
+ }
+}
+
+static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+
+ if (si_pi->fan_is_controlled_by_smc)
+ return 0;
+
+ tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+ return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
+ u32 *speed)
+{
+ u32 tach_period;
+ u32 xclk = amdgpu_asic_get_xclk(adev);
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (adev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ if (tach_period == 0)
+ return -ENOENT;
+
+ *speed = 60 * xclk * 10000 / tach_period;
+
+ return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
+ u32 speed)
+{
+ u32 tach_period, tmp;
+ u32 xclk = amdgpu_asic_get_xclk(adev);
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (adev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ if ((speed < adev->pm.fan_min_rpm) ||
+ (speed > adev->pm.fan_max_rpm))
+ return -EINVAL;
+
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(adev);
+
+ tach_period = 60 * xclk * 10000 / (8 * speed);
+ tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+ tmp |= TARGET_PERIOD(tach_period);
+ WREG32(CG_TACH_CTRL, tmp);
+
+ si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
+
+ return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+
+ if (!si_pi->fan_ctrl_is_in_default_mode) {
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+ WREG32(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(si_pi->t_min);
+ WREG32(CG_FDO_CTRL2, tmp);
+ si_pi->fan_ctrl_is_in_default_mode = true;
+ }
+}
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm.fan.ucode_fan_control) {
+ si_fan_ctrl_start_smc_fan_control(adev);
+ si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
+ }
+}
+
+static void si_thermal_initialize(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ if (adev->pm.fan_pulses_per_revolution) {
+ tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+ tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
+ WREG32(CG_TACH_CTRL, tmp);
+ }
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+ tmp |= TACH_PWM_RESP_RATE(0x28);
+ WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
+{
+ int ret;
+
+ si_thermal_initialize(adev);
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+ if (adev->pm.dpm.fan.ucode_fan_control) {
+ ret = si_halt_smc(adev);
+ if (ret)
+ return ret;
+ ret = si_thermal_setup_fan_table(adev);
+ if (ret)
+ return ret;
+ ret = si_resume_smc(adev);
+ if (ret)
+ return ret;
+ si_thermal_start_smc_fan_control(adev);
+ }
+
+ return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
+{
+ if (!adev->pm.no_fan) {
+ si_fan_ctrl_set_default_mode(adev);
+ si_fan_ctrl_stop_smc_fan_control(adev);
+ }
+}
+
+static int si_dpm_enable(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+ int ret;
+
+ if (amdgpu_si_is_smc_running(adev))
+ return -EINVAL;
+ if (pi->voltage_control || si_pi->voltage_control_svi2)
+ si_enable_voltage_control(adev, true);
+ if (pi->mvdd_control)
+ si_get_mvdd_configuration(adev);
+ if (pi->voltage_control || si_pi->voltage_control_svi2) {
+ ret = si_construct_voltage_tables(adev);
+ if (ret) {
+ DRM_ERROR("si_construct_voltage_tables failed\n");
+ return ret;
+ }
+ }
+ if (eg_pi->dynamic_ac_timing) {
+ ret = si_initialize_mc_reg_table(adev);
+ if (ret)
+ eg_pi->dynamic_ac_timing = false;
+ }
+ if (pi->dynamic_ss)
+ si_enable_spread_spectrum(adev, true);
+ if (pi->thermal_protection)
+ si_enable_thermal_protection(adev, true);
+ si_setup_bsp(adev);
+ si_program_git(adev);
+ si_program_tp(adev);
+ si_program_tpp(adev);
+ si_program_sstp(adev);
+ si_enable_display_gap(adev);
+ si_program_vc(adev);
+ ret = si_upload_firmware(adev);
+ if (ret) {
+ DRM_ERROR("si_upload_firmware failed\n");
+ return ret;
+ }
+ ret = si_process_firmware_header(adev);
+ if (ret) {
+ DRM_ERROR("si_process_firmware_header failed\n");
+ return ret;
+ }
+ ret = si_initial_switch_from_arb_f0_to_f1(adev);
+ if (ret) {
+ DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
+ return ret;
+ }
+ ret = si_init_smc_table(adev);
+ if (ret) {
+ DRM_ERROR("si_init_smc_table failed\n");
+ return ret;
+ }
+ ret = si_init_smc_spll_table(adev);
+ if (ret) {
+ DRM_ERROR("si_init_smc_spll_table failed\n");
+ return ret;
+ }
+ ret = si_init_arb_table_index(adev);
+ if (ret) {
+ DRM_ERROR("si_init_arb_table_index failed\n");
+ return ret;
+ }
+ if (eg_pi->dynamic_ac_timing) {
+ ret = si_populate_mc_reg_table(adev, boot_ps);
+ if (ret) {
+ DRM_ERROR("si_populate_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = si_initialize_smc_cac_tables(adev);
+ if (ret) {
+ DRM_ERROR("si_initialize_smc_cac_tables failed\n");
+ return ret;
+ }
+ ret = si_initialize_hardware_cac_manager(adev);
+ if (ret) {
+ DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
+ return ret;
+ }
+ ret = si_initialize_smc_dte_tables(adev);
+ if (ret) {
+ DRM_ERROR("si_initialize_smc_dte_tables failed\n");
+ return ret;
+ }
+ ret = si_populate_smc_tdp_limits(adev, boot_ps);
+ if (ret) {
+ DRM_ERROR("si_populate_smc_tdp_limits failed\n");
+ return ret;
+ }
+ ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
+ if (ret) {
+ DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
+ return ret;
+ }
+ si_program_response_times(adev);
+ si_program_ds_registers(adev);
+ si_dpm_start_smc(adev);
+ ret = si_notify_smc_display_change(adev, false);
+ if (ret) {
+ DRM_ERROR("si_notify_smc_display_change failed\n");
+ return ret;
+ }
+ si_enable_sclk_control(adev, true);
+ si_start_dpm(adev);
+
+ si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_thermal_start_thermal_controller(adev);
+ ni_update_current_ps(adev, boot_ps);
+
+ return 0;
+}
+
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = si_thermal_enable_alert(adev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static void si_dpm_disable(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+
+ if (!amdgpu_si_is_smc_running(adev))
+ return;
+ si_thermal_stop_thermal_controller(adev);
+ si_disable_ulv(adev);
+ si_clear_vc(adev);
+ if (pi->thermal_protection)
+ si_enable_thermal_protection(adev, false);
+ si_enable_power_containment(adev, boot_ps, false);
+ si_enable_smc_cac(adev, boot_ps, false);
+ si_enable_spread_spectrum(adev, false);
+ si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ si_stop_dpm(adev);
+ si_reset_to_default(adev);
+ si_dpm_stop_smc(adev);
+ si_force_switch_to_arb_f0(adev);
+
+ ni_update_current_ps(adev, boot_ps);
+}
+
+static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+ struct amdgpu_ps *new_ps = &requested_ps;
+
+ ni_update_requested_ps(adev, new_ps);
+ si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
+
+ return 0;
+}
+
+static int si_power_control_set_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
+ int ret;
+
+ ret = si_restrict_performance_levels_before_switch(adev);
+ if (ret)
+ return ret;
+ ret = si_halt_smc(adev);
+ if (ret)
+ return ret;
+ ret = si_populate_smc_tdp_limits(adev, new_ps);
+ if (ret)
+ return ret;
+ ret = si_populate_smc_tdp_limits_2(adev, new_ps);
+ if (ret)
+ return ret;
+ ret = si_resume_smc(adev);
+ if (ret)
+ return ret;
+ ret = si_set_sw_state(adev);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int si_dpm_set_power_state(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+ struct amdgpu_ps *old_ps = &eg_pi->current_rps;
+ int ret;
+
+ ret = si_disable_ulv(adev);
+ if (ret) {
+ DRM_ERROR("si_disable_ulv failed\n");
+ return ret;
+ }
+ ret = si_restrict_performance_levels_before_switch(adev);
+ if (ret) {
+ DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
+ return ret;
+ }
+ if (eg_pi->pcie_performance_request)
+ si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
+ ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
+ ret = si_enable_power_containment(adev, new_ps, false);
+ if (ret) {
+ DRM_ERROR("si_enable_power_containment failed\n");
+ return ret;
+ }
+ ret = si_enable_smc_cac(adev, new_ps, false);
+ if (ret) {
+ DRM_ERROR("si_enable_smc_cac failed\n");
+ return ret;
+ }
+ ret = si_halt_smc(adev);
+ if (ret) {
+ DRM_ERROR("si_halt_smc failed\n");
+ return ret;
+ }
+ ret = si_upload_sw_state(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("si_upload_sw_state failed\n");
+ return ret;
+ }
+ ret = si_upload_smc_data(adev);
+ if (ret) {
+ DRM_ERROR("si_upload_smc_data failed\n");
+ return ret;
+ }
+ ret = si_upload_ulv_state(adev);
+ if (ret) {
+ DRM_ERROR("si_upload_ulv_state failed\n");
+ return ret;
+ }
+ if (eg_pi->dynamic_ac_timing) {
+ ret = si_upload_mc_reg_table(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("si_upload_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = si_program_memory_timing_parameters(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("si_program_memory_timing_parameters failed\n");
+ return ret;
+ }
+ si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
+
+ ret = si_resume_smc(adev);
+ if (ret) {
+ DRM_ERROR("si_resume_smc failed\n");
+ return ret;
+ }
+ ret = si_set_sw_state(adev);
+ if (ret) {
+ DRM_ERROR("si_set_sw_state failed\n");
+ return ret;
+ }
+ ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
+ if (eg_pi->pcie_performance_request)
+ si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
+ ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
+ return ret;
+ }
+ ret = si_enable_smc_cac(adev, new_ps, true);
+ if (ret) {
+ DRM_ERROR("si_enable_smc_cac failed\n");
+ return ret;
+ }
+ ret = si_enable_power_containment(adev, new_ps, true);
+ if (ret) {
+ DRM_ERROR("si_enable_power_containment failed\n");
+ return ret;
+ }
+
+ ret = si_power_control_set_level(adev);
+ if (ret) {
+ DRM_ERROR("si_power_control_set_level failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+
+ ni_update_current_ps(adev, new_ps);
+}
+
+#if 0
+void si_dpm_reset_asic(struct amdgpu_device *adev)
+{
+ si_restrict_performance_levels_before_switch(adev);
+ si_disable_ulv(adev);
+ si_set_boot_state(adev);
+}
+#endif
+
+static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+ si_program_display_gap(adev);
+}
+
+
+static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else if (r600_is_uvd_state(rps->class, rps->class2)) {
+ rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+ rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ adev->pm.dpm.boot_ps = rps;
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ adev->pm.dpm.uvd_ps = rps;
+}
+
+static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ps *ps = si_get_ps(rps);
+ u16 leakage_voltage;
+ struct rv7xx_pl *pl = &ps->performance_levels[index];
+ int ret;
+
+ ps->performance_level_count = index + 1;
+
+ pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+ pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
+ pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+ pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
+
+ pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
+ pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
+ pl->flags = le32_to_cpu(clock_info->si.ulFlags);
+ pl->pcie_gen = r600_get_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ clock_info->si.ucPCIEGen);
+
+ /* patch up vddc if necessary */
+ ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
+ &leakage_voltage);
+ if (ret == 0)
+ pl->vddc = leakage_voltage;
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+ pi->acpi_vddc = pl->vddc;
+ eg_pi->acpi_vddci = pl->vddci;
+ si_pi->acpi_pcie_gen = pl->pcie_gen;
+ }
+
+ if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+ index == 0) {
+ /* XXX disable for A0 tahiti */
+ si_pi->ulv.supported = false;
+ si_pi->ulv.pl = *pl;
+ si_pi->ulv.one_pcie_lane_in_ulv = false;
+ si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+ si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
+ si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
+ }
+
+ if (pi->min_vddc_in_table > pl->vddc)
+ pi->min_vddc_in_table = pl->vddc;
+
+ if (pi->max_vddc_in_table < pl->vddc)
+ pi->max_vddc_in_table = pl->vddc;
+
+ /* patch up boot state */
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ u16 vddc, vddci, mvdd;
+ amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
+ pl->mclk = adev->clock.default_mclk;
+ pl->sclk = adev->clock.default_sclk;
+ pl->vddc = vddc;
+ pl->vddci = vddci;
+ si_pi->mvdd_bootup_value = mvdd;
+ }
+
+ if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+ ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+ }
+}
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static int si_parse_power_table(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct si_ps *ps;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ amdgpu_add_thermal_controller(adev);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!adev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(adev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.ps[i].ps_priv = ps;
+ si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ si_parse_pplib_clock_info(adev,
+ &adev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+ /* fill in the vce power states */
+ for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ u32 sclk, mclk;
+ clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+ sclk |= clock_info->si.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+ mclk |= clock_info->si.ucMemoryClockHigh << 16;
+ adev->pm.dpm.vce_states[i].sclk = sclk;
+ adev->pm.dpm.vce_states[i].mclk = mclk;
+ }
+
+ return 0;
+}
+
+static int si_dpm_init(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi;
+ struct evergreen_power_info *eg_pi;
+ struct ni_power_info *ni_pi;
+ struct si_power_info *si_pi;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 mask;
+
+ si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
+ if (si_pi == NULL)
+ return -ENOMEM;
+ adev->pm.dpm.priv = si_pi;
+ ni_pi = &si_pi->ni;
+ eg_pi = &ni_pi->eg;
+ pi = &eg_pi->rv7xx;
+
+ ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+ if (ret)
+ si_pi->sys_pcie_mask = 0;
+ else
+ si_pi->sys_pcie_mask = mask;
+ si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
+
+ si_set_max_cu_value(adev);
+
+ rv770_get_max_vddc(adev);
+ si_get_leakage_vddc(adev);
+ si_patch_dependency_tables_based_on_leakage(adev);
+
+ pi->acpi_vddc = 0;
+ eg_pi->acpi_vddci = 0;
+ pi->min_vddc_in_table = 0;
+ pi->max_vddc_in_table = 0;
+
+ ret = amdgpu_get_platform_caps(adev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_parse_extended_power_table(adev);
+ if (ret)
+ return ret;
+
+ ret = si_parse_power_table(adev);
+ if (ret)
+ return ret;
+
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+ kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+ if (adev->pm.dpm.voltage_response_time == 0)
+ adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+ if (adev->pm.dpm.backbias_response_time == 0)
+ adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ 0, false, &dividers);
+ if (ret)
+ pi->ref_div = dividers.ref_div + 1;
+ else
+ pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+ eg_pi->smu_uvd_hs = false;
+
+ pi->mclk_strobe_mode_threshold = 40000;
+ if (si_is_special_1gb_platform(adev))
+ pi->mclk_stutter_mode_threshold = 0;
+ else
+ pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
+ pi->mclk_edc_enable_threshold = 40000;
+ eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+ ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
+
+ pi->voltage_control =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!pi->voltage_control) {
+ si_pi->voltage_control_svi2 =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_SVID2);
+ if (si_pi->voltage_control_svi2)
+ amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+ }
+
+ pi->mvdd_control =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
+
+ eg_pi->vddci_control =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!eg_pi->vddci_control)
+ si_pi->vddci_control_svi2 =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_SVID2);
+
+ si_pi->vddc_phase_shed_control =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_PHASE_LUT);
+
+ rv770_get_engine_memory_ss(adev);
+
+ pi->asi = RV770_ASI_DFLT;
+ pi->pasi = CYPRESS_HASI_DFLT;
+ pi->vrc = SISLANDS_VRC_DFLT;
+
+ pi->gfx_clock_gating = true;
+
+ eg_pi->sclk_deep_sleep = true;
+ si_pi->sclk_deep_sleep_above_low = false;
+
+ if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+ pi->thermal_protection = true;
+ else
+ pi->thermal_protection = false;
+
+ eg_pi->dynamic_ac_timing = true;
+
+ eg_pi->light_sleep = true;
+#if defined(CONFIG_ACPI)
+ eg_pi->pcie_performance_request =
+ amdgpu_acpi_is_pcie_performance_request_supported(adev);
+#else
+ eg_pi->pcie_performance_request = false;
+#endif
+
+ si_pi->sram_end = SMC_RAM_END;
+
+ adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+ adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+ adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+ adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+ adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+ adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+ adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+ si_initialize_powertune_defaults(adev);
+
+ /* make sure dc limits are valid */
+ if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ si_pi->fan_ctrl_is_in_default_mode = true;
+
+ return 0;
+}
+
+static void si_dpm_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->pm.dpm.ps)
+ for (i = 0; i < adev->pm.dpm.num_ps; i++)
+ kfree(adev->pm.dpm.ps[i].ps_priv);
+ kfree(adev->pm.dpm.ps);
+ kfree(adev->pm.dpm.priv);
+ kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+ amdgpu_free_extended_power_table(adev);
+}
+
+static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ struct seq_file *m)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct amdgpu_ps *rps = &eg_pi->current_rps;
+ struct si_ps *ps = si_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ pl = &ps->performance_levels[current_index];
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+ }
+}
+
+static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cg_thermal_int;
+
+ switch (type) {
+ case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ cg_thermal_int |= THERM_INT_MASK_HIGH;
+ WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ cg_thermal_int &= ~THERM_INT_MASK_HIGH;
+ WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ cg_thermal_int |= THERM_INT_MASK_LOW;
+ WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ cg_thermal_int &= ~THERM_INT_MASK_LOW;
+ WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ bool queue_thermal = false;
+
+ if (entry == NULL)
+ return -EINVAL;
+
+ switch (entry->src_id) {
+ case 230: /* thermal low to high */
+ DRM_DEBUG("IH: thermal low to high\n");
+ adev->pm.dpm.thermal.high_to_low = false;
+ queue_thermal = true;
+ break;
+ case 231: /* thermal high to low */
+ DRM_DEBUG("IH: thermal high to low\n");
+ adev->pm.dpm.thermal.high_to_low = true;
+ queue_thermal = true;
+ break;
+ default:
+ break;
+ }
+
+ if (queue_thermal)
+ schedule_work(&adev->pm.dpm.thermal.work);
+
+ return 0;
+}
+
+static int si_dpm_late_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!amdgpu_dpm)
+ return 0;
+
+ /* init the sysfs and debugfs files late */
+ ret = amdgpu_pm_sysfs_init(adev);
+ if (ret)
+ return ret;
+
+ ret = si_set_temperature_range(adev);
+ if (ret)
+ return ret;
+#if 0 //TODO ?
+ si_dpm_powergate_uvd(adev, true);
+#endif
+ return 0;
+}
+
+/**
+ * si_dpm_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dpm_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_name = "tahiti";
+ break;
+ case CHIP_PITCAIRN:
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811) ||
+ (adev->pdev->device == 0x6816) ||
+ (adev->pdev->device == 0x6817) ||
+ (adev->pdev->device == 0x6806))
+ chip_name = "pitcairn_k";
+ else
+ chip_name = "pitcairn";
+ break;
+ case CHIP_VERDE:
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
+ (adev->pdev->device == 0x6820) ||
+ (adev->pdev->device == 0x6821) ||
+ (adev->pdev->device == 0x6822) ||
+ (adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682A) ||
+ (adev->pdev->device == 0x682B))
+ chip_name = "verde_k";
+ else
+ chip_name = "verde";
+ break;
+ case CHIP_OLAND:
+ if ((adev->pdev->revision == 0xC7) ||
+ (adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605))
+ chip_name = "oland_k";
+ else
+ chip_name = "oland";
+ break;
+ case CHIP_HAINAN:
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6664) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667))
+ chip_name = "hainan_k";
+ else
+ chip_name = "hainan";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+ if (err) {
+ DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
+ err, fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ }
+ return err;
+
+}
+
+static int si_dpm_sw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+ /* default to balanced state */
+ adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+ adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ adev->pm.default_sclk = adev->clock.default_sclk;
+ adev->pm.default_mclk = adev->clock.default_mclk;
+ adev->pm.current_sclk = adev->clock.default_sclk;
+ adev->pm.current_mclk = adev->clock.default_mclk;
+ adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+ if (amdgpu_dpm == 0)
+ return 0;
+
+ ret = si_dpm_init_microcode(adev);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+ mutex_lock(&adev->pm.mutex);
+ ret = si_dpm_init(adev);
+ if (ret)
+ goto dpm_failed;
+ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ if (amdgpu_dpm == 1)
+ amdgpu_pm_print_power_states(adev);
+ mutex_unlock(&adev->pm.mutex);
+ DRM_INFO("amdgpu: dpm initialized\n");
+
+ return 0;
+
+dpm_failed:
+ si_dpm_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+ DRM_ERROR("amdgpu: dpm initialization failed\n");
+ return ret;
+}
+
+static int si_dpm_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_pm_sysfs_fini(adev);
+ si_dpm_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+
+ return 0;
+}
+
+static int si_dpm_hw_init(void *handle)
+{
+ int ret;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!amdgpu_dpm)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ si_dpm_setup_asic(adev);
+ ret = si_dpm_enable(adev);
+ if (ret)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+static int si_dpm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ si_dpm_disable(adev);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return 0;
+}
+
+static int si_dpm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ /* disable dpm */
+ si_dpm_disable(adev);
+ /* reset the power state */
+ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
+ }
+ return 0;
+}
+
+static int si_dpm_resume(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pm.dpm_enabled) {
+ /* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
+ si_dpm_setup_asic(adev);
+ ret = si_dpm_enable(adev);
+ if (ret)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+ if (adev->pm.dpm_enabled)
+ amdgpu_pm_compute_clocks(adev);
+ }
+ return 0;
+}
+
+static bool si_dpm_is_idle(void *handle)
+{
+ /* XXX */
+ return true;
+}
+
+static int si_dpm_wait_for_idle(void *handle)
+{
+ /* XXX */
+ return 0;
+}
+
+static int si_dpm_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static int si_dpm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int si_dpm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* get temperature in millidegrees */
+static int si_dpm_get_temp(struct amdgpu_device *adev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+ CTF_TEMP_SHIFT;
+
+ if (temp & 0x200)
+ actual_temp = 255;
+ else
+ actual_temp = temp & 0x1ff;
+
+ actual_temp = (actual_temp * 1000);
+
+ return actual_temp;
+}
+
+static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].sclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].mclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
+static void si_dpm_print_power_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *ps = si_get_ps(rps);
+ struct rv7xx_pl *pl;
+ int i;
+
+ amdgpu_dpm_print_class_info(rps->class, rps->class2);
+ amdgpu_dpm_print_cap_info(rps->caps);
+ DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->performance_level_count; i++) {
+ pl = &ps->performance_levels[i];
+ if (adev->asic_type >= CHIP_TAHITI)
+ DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+ else
+ DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ }
+ amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static int si_dpm_early_init(void *handle)
+{
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_dpm_set_dpm_funcs(adev);
+ si_dpm_set_irq_funcs(adev);
+ return 0;
+}
+
+
+const struct amd_ip_funcs si_dpm_ip_funcs = {
+ .name = "si_dpm",
+ .early_init = si_dpm_early_init,
+ .late_init = si_dpm_late_init,
+ .sw_init = si_dpm_sw_init,
+ .sw_fini = si_dpm_sw_fini,
+ .hw_init = si_dpm_hw_init,
+ .hw_fini = si_dpm_hw_fini,
+ .suspend = si_dpm_suspend,
+ .resume = si_dpm_resume,
+ .is_idle = si_dpm_is_idle,
+ .wait_for_idle = si_dpm_wait_for_idle,
+ .soft_reset = si_dpm_soft_reset,
+ .set_clockgating_state = si_dpm_set_clockgating_state,
+ .set_powergating_state = si_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+ .get_temperature = &si_dpm_get_temp,
+ .pre_set_power_state = &si_dpm_pre_set_power_state,
+ .set_power_state = &si_dpm_set_power_state,
+ .post_set_power_state = &si_dpm_post_set_power_state,
+ .display_configuration_changed = &si_dpm_display_configuration_changed,
+ .get_sclk = &si_dpm_get_sclk,
+ .get_mclk = &si_dpm_get_mclk,
+ .print_power_state = &si_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &si_dpm_force_performance_level,
+ .vblank_too_short = &si_dpm_vblank_too_short,
+ .set_fan_control_mode = &si_dpm_set_fan_control_mode,
+ .get_fan_control_mode = &si_dpm_get_fan_control_mode,
+ .set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
+ .get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+};
+
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+ if (adev->pm.funcs == NULL)
+ adev->pm.funcs = &si_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
+ .set = si_dpm_set_interrupt_state,
+ .process = si_dpm_process_interrupt,
+};
+
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+ adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
new file mode 100644
index 000000000000..51ce21c5f4fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SI_DPM_H__
+#define __SI_DPM_H__
+
+#include "amdgpu_atombios.h"
+#include "sislands_smc.h"
+
+#define MC_CG_CONFIG 0x96f
+#define MC_ARB_CG 0x9fa
+#define CG_ARB_REQ(x) ((x) << 0)
+#define CG_ARB_REQ_MASK (0xff << 0)
+
+#define MC_ARB_DRAM_TIMING_1 0x9fc
+#define MC_ARB_DRAM_TIMING_2 0x9fd
+#define MC_ARB_DRAM_TIMING_3 0x9fe
+#define MC_ARB_DRAM_TIMING2_1 0x9ff
+#define MC_ARB_DRAM_TIMING2_2 0xa00
+#define MC_ARB_DRAM_TIMING2_3 0xa01
+
+#define MAX_NO_OF_MVDD_VALUES 2
+#define MAX_NO_VREG_STEPS 32
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+#define RV770_ASI_DFLT 1000
+#define CYPRESS_HASI_DFLT 400000
+#define PCIE_PERF_REQ_PECI_GEN1 2
+#define PCIE_PERF_REQ_PECI_GEN2 3
+#define PCIE_PERF_REQ_PECI_GEN3 4
+#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
+#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
+
+#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
+
+#define RV770_SMC_TABLE_ADDRESS 0xB000
+#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
+
+#define SMC_STROBE_RATIO 0x0F
+#define SMC_STROBE_ENABLE 0x10
+
+#define SMC_MC_EDC_RD_FLAG 0x01
+#define SMC_MC_EDC_WR_FLAG 0x02
+#define SMC_MC_RTT_ENABLE 0x04
+#define SMC_MC_STUTTER_EN 0x08
+
+#define RV770_SMC_VOLTAGEMASK_VDDC 0
+#define RV770_SMC_VOLTAGEMASK_MVDD 1
+#define RV770_SMC_VOLTAGEMASK_VDDCI 2
+#define RV770_SMC_VOLTAGEMASK_MAX 4
+
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define NISLANDS_SMC_STROBE_RATIO 0x0F
+#define NISLANDS_SMC_STROBE_ENABLE 0x10
+
+#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
+#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
+#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
+#define NISLANDS_SMC_MC_STUTTER_EN 0x08
+
+#define MAX_NO_VREG_STEPS 32
+
+#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
+#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
+#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
+
+#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
+#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
+#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
+#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3
+
+#define SISLANDS_LEAKAGE_INDEX0 0xff01
+#define SISLANDS_MAX_LEAKAGE_COUNT 4
+
+#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
+#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
+#define SISLANDS_ACPI_STATE_ARB_INDEX 1
+#define SISLANDS_ULV_STATE_ARB_INDEX 2
+#define SISLANDS_DRIVER_STATE_ARB_INDEX 3
+
+#define SISLANDS_DPM2_MAX_PULSE_SKIP 256
+
+#define SISLANDS_DPM2_NEAR_TDP_DEC 10
+#define SISLANDS_DPM2_ABOVE_SAFE_INC 5
+#define SISLANDS_DPM2_BELOW_SAFE_INC 20
+
+#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80
+
+#define SISLANDS_DPM2_MAXPS_PERCENT_H 99
+#define SISLANDS_DPM2_MAXPS_PERCENT_M 99
+
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
+#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
+#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E
+#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF
+
+#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10
+
+#define SISLANDS_VRC_DFLT 0xC000B3
+#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687
+#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035
+#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550
+
+#define SI_ASI_DFLT 10000
+#define SI_BSP_DFLT 0x41EB
+#define SI_BSU_DFLT 0x2
+#define SI_AH_DFLT 5
+#define SI_RLP_DFLT 25
+#define SI_RMP_DFLT 65
+#define SI_LHP_DFLT 40
+#define SI_LMP_DFLT 15
+#define SI_TD_DFLT 0
+#define SI_UTC_DFLT_00 0x24
+#define SI_UTC_DFLT_01 0x22
+#define SI_UTC_DFLT_02 0x22
+#define SI_UTC_DFLT_03 0x22
+#define SI_UTC_DFLT_04 0x22
+#define SI_UTC_DFLT_05 0x22
+#define SI_UTC_DFLT_06 0x22
+#define SI_UTC_DFLT_07 0x22
+#define SI_UTC_DFLT_08 0x22
+#define SI_UTC_DFLT_09 0x22
+#define SI_UTC_DFLT_10 0x22
+#define SI_UTC_DFLT_11 0x22
+#define SI_UTC_DFLT_12 0x22
+#define SI_UTC_DFLT_13 0x22
+#define SI_UTC_DFLT_14 0x22
+#define SI_DTC_DFLT_00 0x24
+#define SI_DTC_DFLT_01 0x22
+#define SI_DTC_DFLT_02 0x22
+#define SI_DTC_DFLT_03 0x22
+#define SI_DTC_DFLT_04 0x22
+#define SI_DTC_DFLT_05 0x22
+#define SI_DTC_DFLT_06 0x22
+#define SI_DTC_DFLT_07 0x22
+#define SI_DTC_DFLT_08 0x22
+#define SI_DTC_DFLT_09 0x22
+#define SI_DTC_DFLT_10 0x22
+#define SI_DTC_DFLT_11 0x22
+#define SI_DTC_DFLT_12 0x22
+#define SI_DTC_DFLT_13 0x22
+#define SI_DTC_DFLT_14 0x22
+#define SI_VRC_DFLT 0x0000C003
+#define SI_VOLTAGERESPONSETIME_DFLT 1000
+#define SI_BACKBIASRESPONSETIME_DFLT 1000
+#define SI_VRU_DFLT 0x3
+#define SI_SPLLSTEPTIME_DFLT 0x1000
+#define SI_SPLLSTEPUNIT_DFLT 0x3
+#define SI_TPU_DFLT 0
+#define SI_TPC_DFLT 0x200
+#define SI_SSTU_DFLT 0
+#define SI_SST_DFLT 0x00C8
+#define SI_GICST_DFLT 0x200
+#define SI_FCT_DFLT 0x0400
+#define SI_FCTU_DFLT 0
+#define SI_CTXCGTT3DRPHC_DFLT 0x20
+#define SI_CTXCGTT3DRSDC_DFLT 0x40
+#define SI_VDDC3DOORPHC_DFLT 0x100
+#define SI_VDDC3DOORSDC_DFLT 0x7
+#define SI_VDDC3DOORSU_DFLT 0
+#define SI_MPLLLOCKTIME_DFLT 100
+#define SI_MPLLRESETTIME_DFLT 150
+#define SI_VCOSTEPPCT_DFLT 20
+#define SI_ENDINGVCOSTEPPCT_DFLT 5
+#define SI_REFERENCEDIVIDER_DFLT 4
+
+#define SI_PM_NUMBER_OF_TC 15
+#define SI_PM_NUMBER_OF_SCLKS 20
+#define SI_PM_NUMBER_OF_MCLKS 4
+#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define SI_TEMP_RANGE_MIN (90 * 1000)
+#define SI_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum ni_dc_cac_level
+{
+ NISLANDS_DCCAC_LEVEL_0 = 0,
+ NISLANDS_DCCAC_LEVEL_1,
+ NISLANDS_DCCAC_LEVEL_2,
+ NISLANDS_DCCAC_LEVEL_3,
+ NISLANDS_DCCAC_LEVEL_4,
+ NISLANDS_DCCAC_LEVEL_5,
+ NISLANDS_DCCAC_LEVEL_6,
+ NISLANDS_DCCAC_LEVEL_7,
+ NISLANDS_DCCAC_MAX_LEVELS
+};
+
+enum si_cac_config_reg_type
+{
+ SISLANDS_CACCONFIG_MMR = 0,
+ SISLANDS_CACCONFIG_CGIND,
+ SISLANDS_CACCONFIG_MAX
+};
+
+enum si_power_level {
+ SI_POWER_LEVEL_LOW = 0,
+ SI_POWER_LEVEL_MEDIUM = 1,
+ SI_POWER_LEVEL_HIGH = 2,
+ SI_POWER_LEVEL_CTXSW = 3,
+};
+
+enum si_td {
+ SI_TD_AUTO,
+ SI_TD_UP,
+ SI_TD_DOWN,
+};
+
+enum si_display_watermark {
+ SI_DISPLAY_WATERMARK_LOW = 0,
+ SI_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum si_display_gap
+{
+ SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+ SI_PM_DISPLAY_GAP_VBLANK = 1,
+ SI_PM_DISPLAY_GAP_WATERMARK = 2,
+ SI_PM_DISPLAY_GAP_IGNORE = 3,
+};
+
+extern const struct amd_ip_funcs si_dpm_ip_funcs;
+
+struct ni_leakage_coeffients
+{
+ u32 at;
+ u32 bt;
+ u32 av;
+ u32 bv;
+ s32 t_slope;
+ s32 t_intercept;
+ u32 t_ref;
+};
+
+struct SMC_Evergreen_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
+
+struct evergreen_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct evergreen_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_Evergreen_MCRegisterSet
+{
+ uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
+
+struct SMC_Evergreen_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+ SMC_Evergreen_MCRegisterSet data[5];
+};
+
+typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+
+struct SMC_NIslands_MCRegisterSet
+{
+ uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
+
+struct ni_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_NIslands_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
+
+struct SMC_NIslands_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+ SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
+
+struct evergreen_ulv_param {
+ bool supported;
+ struct rv7xx_pl *pl;
+};
+
+struct evergreen_arb_registers {
+ u32 mc_arb_dram_timing;
+ u32 mc_arb_dram_timing2;
+ u32 mc_arb_rfsh_rate;
+ u32 mc_arb_burst_time;
+};
+
+struct at {
+ u32 rlp;
+ u32 rmp;
+ u32 lhp;
+ u32 lmp;
+};
+
+struct ni_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 mclk_pwrmgt_cntl;
+ u32 dll_cntl;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_ad_func_cntl_2;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_dq_func_cntl_2;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct RV770_SMC_SCLK_VALUE
+{
+ uint32_t vCG_SPLL_FUNC_CNTL;
+ uint32_t vCG_SPLL_FUNC_CNTL_2;
+ uint32_t vCG_SPLL_FUNC_CNTL_3;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t sclk_value;
+};
+
+typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
+
+struct RV770_SMC_MCLK_VALUE
+{
+ uint32_t vMPLL_AD_FUNC_CNTL;
+ uint32_t vMPLL_AD_FUNC_CNTL_2;
+ uint32_t vMPLL_DQ_FUNC_CNTL;
+ uint32_t vMPLL_DQ_FUNC_CNTL_2;
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vDLL_CNTL;
+ uint32_t vMPLL_SS;
+ uint32_t vMPLL_SS2;
+ uint32_t mclk_value;
+};
+
+typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
+
+
+struct RV730_SMC_MCLK_VALUE
+{
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vDLL_CNTL;
+ uint32_t vMPLL_FUNC_CNTL;
+ uint32_t vMPLL_FUNC_CNTL2;
+ uint32_t vMPLL_FUNC_CNTL3;
+ uint32_t vMPLL_SS;
+ uint32_t vMPLL_SS2;
+ uint32_t mclk_value;
+};
+
+typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
+
+struct RV770_SMC_VOLTAGE_VALUE
+{
+ uint16_t value;
+ uint8_t index;
+ uint8_t padding;
+};
+
+typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
+
+union RV7XX_SMC_MCLK_VALUE
+{
+ RV770_SMC_MCLK_VALUE mclk770;
+ RV730_SMC_MCLK_VALUE mclk730;
+};
+
+typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
+
+struct RV770_SMC_HW_PERFORMANCE_LEVEL
+{
+ uint8_t arbValue;
+ union{
+ uint8_t seqValue;
+ uint8_t ACIndex;
+ };
+ uint8_t displayWatermark;
+ uint8_t gen2PCIE;
+ uint8_t gen2XSP;
+ uint8_t backbias;
+ uint8_t strobeMode;
+ uint8_t mcFlags;
+ uint32_t aT;
+ uint32_t bSP;
+ RV770_SMC_SCLK_VALUE sclk;
+ RV7XX_SMC_MCLK_VALUE mclk;
+ RV770_SMC_VOLTAGE_VALUE vddc;
+ RV770_SMC_VOLTAGE_VALUE mvdd;
+ RV770_SMC_VOLTAGE_VALUE vddci;
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t stateFlags;
+ uint8_t padding;
+};
+
+typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
+
+struct RV770_SMC_SWSTATE
+{
+ uint8_t flags;
+ uint8_t padding1;
+ uint8_t padding2;
+ uint8_t padding3;
+ RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
+
+struct RV770_SMC_VOLTAGEMASKTABLE
+{
+ uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
+ uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
+
+struct RV770_SMC_STATETABLE
+{
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint8_t highSMIO[MAX_NO_VREG_STEPS];
+ uint32_t lowSMIO[MAX_NO_VREG_STEPS];
+ RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ RV770_SMC_SWSTATE initialState;
+ RV770_SMC_SWSTATE ACPIState;
+ RV770_SMC_SWSTATE driverState;
+ RV770_SMC_SWSTATE ULVState;
+};
+
+typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
+
+struct vddc_table_entry {
+ u16 vddc;
+ u8 vddc_index;
+ u8 high_smio;
+ u32 low_smio;
+};
+
+struct rv770_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_ad_func_cntl_2;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_dq_func_cntl_2;
+ u32 mclk_pwrmgt_cntl;
+ u32 dll_cntl;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct rv730_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 mclk_pwrmgt_cntl;
+ u32 dll_cntl;
+ u32 mpll_func_cntl;
+ u32 mpll_func_cntl2;
+ u32 mpll_func_cntl3;
+ u32 mpll_ss;
+ u32 mpll_ss2;
+};
+
+union r7xx_clock_registers {
+ struct rv770_clock_registers rv770;
+ struct rv730_clock_registers rv730;
+};
+
+struct rv7xx_power_info {
+ /* flags */
+ bool mem_gddr5;
+ bool pcie_gen2;
+ bool dynamic_pcie_gen2;
+ bool acpi_pcie_gen2;
+ bool boot_in_gen2;
+ bool voltage_control; /* vddc */
+ bool mvdd_control;
+ bool sclk_ss;
+ bool mclk_ss;
+ bool dynamic_ss;
+ bool gfx_clock_gating;
+ bool mg_clock_gating;
+ bool mgcgtssm;
+ bool power_gating;
+ bool thermal_protection;
+ bool display_gap;
+ bool dcodt;
+ bool ulps;
+ /* registers */
+ union r7xx_clock_registers clk_regs;
+ u32 s0_vid_lower_smio_cntl;
+ /* voltage */
+ u32 vddc_mask_low;
+ u32 mvdd_mask_low;
+ u32 mvdd_split_frequency;
+ u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
+ u16 max_vddc;
+ u16 max_vddc_in_table;
+ u16 min_vddc_in_table;
+ struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
+ u8 valid_vddc_entries;
+ /* dc odt */
+ u32 mclk_odt_threshold;
+ u8 odt_value_0[2];
+ u8 odt_value_1[2];
+ /* stored values */
+ u32 boot_sclk;
+ u16 acpi_vddc;
+ u32 ref_div;
+ u32 active_auto_throttle_sources;
+ u32 mclk_stutter_mode_threshold;
+ u32 mclk_strobe_mode_threshold;
+ u32 mclk_edc_enable_threshold;
+ u32 bsp;
+ u32 bsu;
+ u32 pbsp;
+ u32 pbsu;
+ u32 dsp;
+ u32 psp;
+ u32 asi;
+ u32 pasi;
+ u32 vrc;
+ u32 restricted_levels;
+ u32 rlp;
+ u32 rmp;
+ u32 lhp;
+ u32 lmp;
+ /* smc offsets */
+ u16 state_table_start;
+ u16 soft_regs_start;
+ u16 sram_end;
+ /* scratch structs */
+ RV770_SMC_STATETABLE smc_statetable;
+};
+
+struct rv7xx_pl {
+ u32 sclk;
+ u32 mclk;
+ u16 vddc;
+ u16 vddci; /* eg+ only */
+ u32 flags;
+ enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+};
+
+struct rv7xx_ps {
+ struct rv7xx_pl high;
+ struct rv7xx_pl medium;
+ struct rv7xx_pl low;
+ bool dc_compatible;
+};
+
+struct si_ps {
+ u16 performance_level_count;
+ bool dc_compatible;
+ struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+struct ni_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ni_cac_data
+{
+ struct ni_leakage_coeffients leakage_coefficients;
+ u32 i_leakage;
+ s32 leakage_minimum_temperature;
+ u32 pwr_const;
+ u32 dc_cac_value;
+ u32 bif_cac_value;
+ u32 lkge_pwr;
+ u8 mc_wr_weight;
+ u8 mc_rd_weight;
+ u8 allow_ovrflw;
+ u8 num_win_tdp;
+ u8 l2num_win_tdp;
+ u8 lts_truncate_n;
+};
+
+struct evergreen_power_info {
+ /* must be first! */
+ struct rv7xx_power_info rv7xx;
+ /* flags */
+ bool vddci_control;
+ bool dynamic_ac_timing;
+ bool abm;
+ bool mcls;
+ bool light_sleep;
+ bool memory_transition;
+ bool pcie_performance_request;
+ bool pcie_performance_request_registered;
+ bool sclk_deep_sleep;
+ bool dll_default_on;
+ bool ls_clock_gating;
+ bool smu_uvd_hs;
+ bool uvd_enabled;
+ /* stored values */
+ u16 acpi_vddci;
+ u8 mvdd_high_index;
+ u8 mvdd_low_index;
+ u32 mclk_edc_wr_enable_threshold;
+ struct evergreen_mc_reg_table mc_reg_table;
+ struct atom_voltage_table vddc_voltage_table;
+ struct atom_voltage_table vddci_voltage_table;
+ struct evergreen_arb_registers bootup_arb_registers;
+ struct evergreen_ulv_param ulv;
+ struct at ats[2];
+ /* smc offsets */
+ u16 mc_reg_table_start;
+ struct amdgpu_ps current_rps;
+ struct rv7xx_ps current_ps;
+ struct amdgpu_ps requested_rps;
+ struct rv7xx_ps requested_ps;
+};
+
+struct PP_NIslands_Dpm2PerfLevel
+{
+ uint8_t MaxPS;
+ uint8_t TgtAct;
+ uint8_t MaxPS_StepInc;
+ uint8_t MaxPS_StepDec;
+ uint8_t PSST;
+ uint8_t NearTDPDec;
+ uint8_t AboveSafeInc;
+ uint8_t BelowSafeInc;
+ uint8_t PSDeltaLimit;
+ uint8_t PSDeltaWin;
+ uint8_t Reserved[6];
+};
+
+typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
+
+struct PP_NIslands_DPM2Parameters
+{
+ uint32_t TDPLimit;
+ uint32_t NearTDPLimit;
+ uint32_t SafePowerLimit;
+ uint32_t PowerBoostLimit;
+};
+typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
+
+struct NISLANDS_SMC_SCLK_VALUE
+{
+ uint32_t vCG_SPLL_FUNC_CNTL;
+ uint32_t vCG_SPLL_FUNC_CNTL_2;
+ uint32_t vCG_SPLL_FUNC_CNTL_3;
+ uint32_t vCG_SPLL_FUNC_CNTL_4;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t sclk_value;
+};
+
+typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
+
+struct NISLANDS_SMC_MCLK_VALUE
+{
+ uint32_t vMPLL_FUNC_CNTL;
+ uint32_t vMPLL_FUNC_CNTL_1;
+ uint32_t vMPLL_FUNC_CNTL_2;
+ uint32_t vMPLL_AD_FUNC_CNTL;
+ uint32_t vMPLL_AD_FUNC_CNTL_2;
+ uint32_t vMPLL_DQ_FUNC_CNTL;
+ uint32_t vMPLL_DQ_FUNC_CNTL_2;
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vDLL_CNTL;
+ uint32_t vMPLL_SS;
+ uint32_t vMPLL_SS2;
+ uint32_t mclk_value;
+};
+
+typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
+
+struct NISLANDS_SMC_VOLTAGE_VALUE
+{
+ uint16_t value;
+ uint8_t index;
+ uint8_t padding;
+};
+
+typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
+
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+ uint8_t arbValue;
+ uint8_t ACIndex;
+ uint8_t displayWatermark;
+ uint8_t gen2PCIE;
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t strobeMode;
+ uint8_t mcFlags;
+ uint32_t aT;
+ uint32_t bSP;
+ NISLANDS_SMC_SCLK_VALUE sclk;
+ NISLANDS_SMC_MCLK_VALUE mclk;
+ NISLANDS_SMC_VOLTAGE_VALUE vddc;
+ NISLANDS_SMC_VOLTAGE_VALUE mvdd;
+ NISLANDS_SMC_VOLTAGE_VALUE vddci;
+ NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
+ uint32_t powergate_en;
+ uint8_t hUp;
+ uint8_t hDown;
+ uint8_t stateFlags;
+ uint8_t arbRefreshState;
+ uint32_t SQPowerThrottle;
+ uint32_t SQPowerThrottle_2;
+ uint32_t reserved[2];
+ PP_NIslands_Dpm2PerfLevel dpm2;
+};
+
+typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct NISLANDS_SMC_SWSTATE
+{
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
+};
+
+typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+
+struct NISLANDS_SMC_VOLTAGEMASKTABLE
+{
+ uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+ uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define NISLANDS_MAX_NO_VREG_STEPS 32
+
+struct NISLANDS_SMC_STATETABLE
+{
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+ uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+ NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ PP_NIslands_DPM2Parameters dpm2Params;
+ NISLANDS_SMC_SWSTATE initialState;
+ NISLANDS_SMC_SWSTATE ACPIState;
+ NISLANDS_SMC_SWSTATE ULVState;
+ NISLANDS_SMC_SWSTATE driverState;
+ NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
+
+struct ni_power_info {
+ /* must be first! */
+ struct evergreen_power_info eg;
+ struct ni_clock_registers clock_registers;
+ struct ni_mc_reg_table mc_reg_table;
+ u32 mclk_rtt_mode_threshold;
+ /* flags */
+ bool use_power_boost_limit;
+ bool support_cac_long_term_average;
+ bool cac_enabled;
+ bool cac_configuration_required;
+ bool driver_calculate_cac_leakage;
+ bool pc_enabled;
+ bool enable_power_containment;
+ bool enable_cac;
+ bool enable_sq_ramping;
+ /* smc offsets */
+ u16 arb_table_start;
+ u16 fan_table_start;
+ u16 cac_table_start;
+ u16 spll_table_start;
+ /* CAC stuff */
+ struct ni_cac_data cac_data;
+ u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
+ const struct ni_cac_weights *cac_weights;
+ u8 lta_window_size;
+ u8 lts_truncate;
+ struct si_ps current_ps;
+ struct si_ps requested_ps;
+ /* scratch structs */
+ SMC_NIslands_MCRegisters smc_mc_reg_table;
+ NISLANDS_SMC_STATETABLE smc_statetable;
+};
+
+struct si_cac_config_reg
+{
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum si_cac_config_reg_type type;
+};
+
+struct si_powertune_data
+{
+ u32 cac_window;
+ u32 l2_lta_window_size_default;
+ u8 lts_truncate_default;
+ u8 shift_n_default;
+ u8 operating_temp;
+ struct ni_leakage_coeffients leakage_coefficients;
+ u32 fixed_kt;
+ u32 lkge_lut_v0_percent;
+ u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
+ bool enable_powertune_by_default;
+};
+
+struct si_dyn_powertune_data
+{
+ u32 cac_leakage;
+ s32 leakage_minimum_temperature;
+ u32 wintime;
+ u32 l2_lta_window_size;
+ u8 lts_truncate;
+ u8 shift_n;
+ u8 dc_pwr_value;
+ bool disable_uvd_powertune;
+};
+
+struct si_dte_data
+{
+ u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+ u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+ u32 k;
+ u32 t0;
+ u32 max_t;
+ u8 window_size;
+ u8 temp_select;
+ u8 dte_mode;
+ u8 tdep_count;
+ u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ u32 t_threshold;
+ bool enable_dte_by_default;
+};
+
+struct si_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 dll_cntl;
+ u32 mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_func_cntl;
+ u32 mpll_func_cntl_1;
+ u32 mpll_func_cntl_2;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct si_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_leakage_voltage_entry
+{
+ u16 voltage;
+ u16 leakage_index;
+};
+
+struct si_leakage_voltage
+{
+ u16 count;
+ struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+
+struct si_ulv_param {
+ bool supported;
+ u32 cg_ulv_control;
+ u32 cg_ulv_parameter;
+ u32 volt_change_delay;
+ struct rv7xx_pl pl;
+ bool one_pcie_lane_in_ulv;
+};
+
+struct si_power_info {
+ /* must be first! */
+ struct ni_power_info ni;
+ struct si_clock_registers clock_registers;
+ struct si_mc_reg_table mc_reg_table;
+ struct atom_voltage_table mvdd_voltage_table;
+ struct atom_voltage_table vddc_phase_shed_table;
+ struct si_leakage_voltage leakage_voltage;
+ u16 mvdd_bootup_value;
+ struct si_ulv_param ulv;
+ u32 max_cu;
+ /* pcie gen */
+ enum amdgpu_pcie_gen force_pcie_gen;
+ enum amdgpu_pcie_gen boot_pcie_gen;
+ enum amdgpu_pcie_gen acpi_pcie_gen;
+ u32 sys_pcie_mask;
+ /* flags */
+ bool enable_dte;
+ bool enable_ppm;
+ bool vddc_phase_shed_control;
+ bool pspp_notify_required;
+ bool sclk_deep_sleep_above_low;
+ bool voltage_control_svi2;
+ bool vddci_control_svi2;
+ /* smc offsets */
+ u32 sram_end;
+ u32 state_table_start;
+ u32 soft_regs_start;
+ u32 mc_reg_table_start;
+ u32 arb_table_start;
+ u32 cac_table_start;
+ u32 dte_table_start;
+ u32 spll_table_start;
+ u32 papm_cfg_table_start;
+ u32 fan_table_start;
+ /* CAC stuff */
+ const struct si_cac_config_reg *cac_weights;
+ const struct si_cac_config_reg *lcac_config;
+ const struct si_cac_config_reg *cac_override;
+ const struct si_powertune_data *powertune_data;
+ struct si_dyn_powertune_data dyn_powertune_data;
+ /* DTE stuff */
+ struct si_dte_data dte_data;
+ /* scratch structs */
+ SMC_SIslands_MCRegisters smc_mc_reg_table;
+ SISLANDS_SMC_STATETABLE smc_statetable;
+ PP_SIslands_PAPMParameters papm_parm;
+ /* SVI2 */
+ u8 svd_gpio_id;
+ u8 svc_gpio_id;
+ /* fan control */
+ bool fan_ctrl_is_in_default_mode;
+ u32 t_min;
+ u32 fan_ctrl_default_mode;
+ bool fan_is_controlled_by_smc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
new file mode 100644
index 000000000000..8fae3d4a2360
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "si/sid.h"
+#include "si_ih.h"
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+static void si_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+ u32 ih_cntl = RREG32(IH_CNTL);
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+ ih_cntl |= ENABLE_INTR;
+ ih_rb_cntl |= IH_RB_ENABLE;
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ adev->irq.ih.enabled = true;
+}
+
+static void si_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+ u32 ih_cntl = RREG32(IH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_ENABLE;
+ ih_cntl &= ~ENABLE_INTR;
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+ adev->irq.ih.enabled = false;
+ adev->irq.ih.rptr = 0;
+}
+
+static int si_ih_irq_init(struct amdgpu_device *adev)
+{
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+ u64 wptr_off;
+
+ si_ih_disable_interrupts(adev);
+ WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+ WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+ rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+
+ ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1) |
+ IH_WPTR_WRITEBACK_ENABLE;
+
+ wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+ WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+ WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+
+ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+ if (adev->irq.msi_enabled)
+ ih_cntl |= RPTR_REARM;
+ WREG32(IH_CNTL, ih_cntl);
+
+ pci_set_master(adev->pdev);
+ si_ih_enable_interrupts(adev);
+
+ return 0;
+}
+
+static void si_ih_irq_disable(struct amdgpu_device *adev)
+{
+ si_ih_disable_interrupts(adev);
+ mdelay(1);
+}
+
+static u32 si_ih_get_wptr(struct amdgpu_device *adev)
+{
+ u32 wptr, tmp;
+
+ wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+ if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
+ wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+ adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & adev->irq.ih.ptr_mask);
+}
+
+static void si_ih_decode_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ uint32_t dw[4];
+
+ dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+ entry->vm_id = (dw[2] >> 8) & 0xff;
+
+ adev->irq.ih.rptr += 16;
+}
+
+static void si_ih_set_rptr(struct amdgpu_device *adev)
+{
+ WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int si_ih_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_ih_set_interrupt_funcs(adev);
+
+ return 0;
+}
+
+static int si_ih_sw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ if (r)
+ return r;
+
+ return amdgpu_irq_init(adev);
+}
+
+static int si_ih_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev);
+
+ return 0;
+}
+
+static int si_ih_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_ih_irq_init(adev);
+}
+
+static int si_ih_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_ih_irq_disable(adev);
+
+ return 0;
+}
+
+static int si_ih_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_ih_hw_fini(adev);
+}
+
+static int si_ih_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_ih_hw_init(adev);
+}
+
+static bool si_ih_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+ return false;
+
+ return true;
+}
+
+static int si_ih_wait_for_idle(void *handle)
+{
+ unsigned i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (si_ih_is_idle(handle))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int si_ih_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int si_ih_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int si_ih_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs si_ih_ip_funcs = {
+ .name = "si_ih",
+ .early_init = si_ih_early_init,
+ .late_init = NULL,
+ .sw_init = si_ih_sw_init,
+ .sw_fini = si_ih_sw_fini,
+ .hw_init = si_ih_hw_init,
+ .hw_fini = si_ih_hw_fini,
+ .suspend = si_ih_suspend,
+ .resume = si_ih_resume,
+ .is_idle = si_ih_is_idle,
+ .wait_for_idle = si_ih_wait_for_idle,
+ .soft_reset = si_ih_soft_reset,
+ .set_clockgating_state = si_ih_set_clockgating_state,
+ .set_powergating_state = si_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs si_ih_funcs = {
+ .get_wptr = si_ih_get_wptr,
+ .decode_iv = si_ih_decode_iv,
+ .set_rptr = si_ih_set_rptr
+};
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ if (adev->irq.ih_funcs == NULL)
+ adev->irq.ih_funcs = &si_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
new file mode 100644
index 000000000000..f3e3a954369c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_IH_H__
+#define __SI_IH_H__
+
+extern const struct amd_ip_funcs si_ih_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
new file mode 100644
index 000000000000..668ba99d6c05
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "si/sid.h"
+#include "ppsmc.h"
+#include "amdgpu_ucode.h"
+#include "sislands_smc.h"
+
+static int si_set_smc_sram_address(struct amdgpu_device *adev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(SMC_IND_INDEX_0, smc_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 data, original_data, addr, extra_shift;
+
+ if (smc_start_address & 3)
+ return -EINVAL;
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ ret = si_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = si_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ original_data = RREG32(SMC_IND_DATA_0);
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* SMC address space is BE */
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = si_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ WREG32(SMC_IND_DATA_0, data);
+ }
+
+done:
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
+
+void amdgpu_si_start_smc(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+ tmp &= ~RST_REG;
+
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void amdgpu_si_reset_smc(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+
+ tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
+ RST_REG;
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
+{
+ static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+
+ return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ if (enable)
+ tmp &= ~CK_DISABLE;
+ else
+ tmp |= CK_DISABLE;
+
+ WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
+{
+ u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+ u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ if (!(rst & RST_REG) && !(clk & CK_DISABLE))
+ return true;
+
+ return false;
+}
+
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
+ PPSMC_Msg msg)
+{
+ u32 tmp;
+ int i;
+
+ if (!amdgpu_si_is_smc_running(adev))
+ return PPSMC_Result_Failed;
+
+ WREG32(SMC_MESSAGE_0, msg);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SMC_RESP_0);
+ if (tmp != 0)
+ break;
+ udelay(1);
+ }
+
+ return (PPSMC_Result)RREG32(SMC_RESP_0);
+}
+
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ int i;
+
+ if (!amdgpu_si_is_smc_running(adev))
+ return PPSMC_Result_OK;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ if ((tmp & CKEN) == 0)
+ break;
+ udelay(1);
+ }
+
+ return PPSMC_Result_OK;
+}
+
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ unsigned long flags;
+ u32 ucode_start_address;
+ u32 ucode_size;
+ const u8 *src;
+ u32 data;
+
+ if (!adev->pm.fw)
+ return -EINVAL;
+
+ hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ if (ucode_size & 3)
+ return -EINVAL;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(SMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ while (ucode_size >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ ucode_size -= 4;
+ }
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return 0;
+}
+
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 *value, u32 limit)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ ret = si_set_smc_sram_address(adev, smc_address, limit);
+ if (ret == 0)
+ *value = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
+
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 value, u32 limit)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ ret = si_set_smc_sram_address(adev, smc_address, limit);
+ if (ret == 0)
+ WREG32(SMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
new file mode 100644
index 000000000000..d2930eceaf3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_SISLANDS_SMC_H
+#define PP_SISLANDS_SMC_H
+
+#include "ppsmc.h"
+
+#pragma pack(push, 1)
+
+#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+
+struct PP_SIslands_Dpm2PerfLevel
+{
+ uint8_t MaxPS;
+ uint8_t TgtAct;
+ uint8_t MaxPS_StepInc;
+ uint8_t MaxPS_StepDec;
+ uint8_t PSSamplingTime;
+ uint8_t NearTDPDec;
+ uint8_t AboveSafeInc;
+ uint8_t BelowSafeInc;
+ uint8_t PSDeltaLimit;
+ uint8_t PSDeltaWin;
+ uint16_t PwrEfficiencyRatio;
+ uint8_t Reserved[4];
+};
+
+typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
+
+struct PP_SIslands_DPM2Status
+{
+ uint32_t dpm2Flags;
+ uint8_t CurrPSkip;
+ uint8_t CurrPSkipPowerShift;
+ uint8_t CurrPSkipTDP;
+ uint8_t CurrPSkipOCP;
+ uint8_t MaxSPLLIndex;
+ uint8_t MinSPLLIndex;
+ uint8_t CurrSPLLIndex;
+ uint8_t InfSweepMode;
+ uint8_t InfSweepDir;
+ uint8_t TDPexceeded;
+ uint8_t reserved;
+ uint8_t SwitchDownThreshold;
+ uint32_t SwitchDownCounter;
+ uint32_t SysScalingFactor;
+};
+
+typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
+
+struct PP_SIslands_DPM2Parameters
+{
+ uint32_t TDPLimit;
+ uint32_t NearTDPLimit;
+ uint32_t SafePowerLimit;
+ uint32_t PowerBoostLimit;
+ uint32_t MinLimitDelta;
+};
+typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
+
+struct PP_SIslands_PAPMStatus
+{
+ uint32_t EstimatedDGPU_T;
+ uint32_t EstimatedDGPU_P;
+ uint32_t EstimatedAPU_T;
+ uint32_t EstimatedAPU_P;
+ uint8_t dGPU_T_Limit_Exceeded;
+ uint8_t reserved[3];
+};
+typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
+
+struct PP_SIslands_PAPMParameters
+{
+ uint32_t NearTDPLimitTherm;
+ uint32_t NearTDPLimitPAPM;
+ uint32_t PlatformPowerLimit;
+ uint32_t dGPU_T_Limit;
+ uint32_t dGPU_T_Warning;
+ uint32_t dGPU_T_Hysteresis;
+};
+typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
+
+struct SISLANDS_SMC_SCLK_VALUE
+{
+ uint32_t vCG_SPLL_FUNC_CNTL;
+ uint32_t vCG_SPLL_FUNC_CNTL_2;
+ uint32_t vCG_SPLL_FUNC_CNTL_3;
+ uint32_t vCG_SPLL_FUNC_CNTL_4;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t sclk_value;
+};
+
+typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
+
+struct SISLANDS_SMC_MCLK_VALUE
+{
+ uint32_t vMPLL_FUNC_CNTL;
+ uint32_t vMPLL_FUNC_CNTL_1;
+ uint32_t vMPLL_FUNC_CNTL_2;
+ uint32_t vMPLL_AD_FUNC_CNTL;
+ uint32_t vMPLL_DQ_FUNC_CNTL;
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vDLL_CNTL;
+ uint32_t vMPLL_SS;
+ uint32_t vMPLL_SS2;
+ uint32_t mclk_value;
+};
+
+typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
+
+struct SISLANDS_SMC_VOLTAGE_VALUE
+{
+ uint16_t value;
+ uint8_t index;
+ uint8_t phase_settings;
+};
+
+typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
+
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+ uint8_t ACIndex;
+ uint8_t displayWatermark;
+ uint8_t gen2PCIE;
+ uint8_t UVDWatermark;
+ uint8_t VCEWatermark;
+ uint8_t strobeMode;
+ uint8_t mcFlags;
+ uint8_t padding;
+ uint32_t aT;
+ uint32_t bSP;
+ SISLANDS_SMC_SCLK_VALUE sclk;
+ SISLANDS_SMC_MCLK_VALUE mclk;
+ SISLANDS_SMC_VOLTAGE_VALUE vddc;
+ SISLANDS_SMC_VOLTAGE_VALUE mvdd;
+ SISLANDS_SMC_VOLTAGE_VALUE vddci;
+ SISLANDS_SMC_VOLTAGE_VALUE std_vddc;
+ uint8_t hysteresisUp;
+ uint8_t hysteresisDown;
+ uint8_t stateFlags;
+ uint8_t arbRefreshState;
+ uint32_t SQPowerThrottle;
+ uint32_t SQPowerThrottle_2;
+ uint32_t MaxPoweredUpCU;
+ SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc;
+ SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc;
+ uint32_t reserved[2];
+ PP_SIslands_Dpm2PerfLevel dpm2;
+};
+
+#define SISLANDS_SMC_STROBE_RATIO 0x0F
+#define SISLANDS_SMC_STROBE_ENABLE 0x10
+
+#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01
+#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02
+#define SISLANDS_SMC_MC_RTT_ENABLE 0x04
+#define SISLANDS_SMC_MC_STUTTER_EN 0x08
+#define SISLANDS_SMC_MC_PG_EN 0x10
+
+typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct SISLANDS_SMC_SWSTATE
+{
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
+};
+
+typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
+#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
+#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
+#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
+
+struct SISLANDS_SMC_VOLTAGEMASKTABLE
+{
+ uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define SISLANDS_MAX_NO_VREG_STEPS 32
+
+struct SISLANDS_SMC_STATETABLE
+{
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+ SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
+ PP_SIslands_DPM2Parameters dpm2Params;
+ SISLANDS_SMC_SWSTATE initialState;
+ SISLANDS_SMC_SWSTATE ACPIState;
+ SISLANDS_SMC_SWSTATE ULVState;
+ SISLANDS_SMC_SWSTATE driverState;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
+
+#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
+#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC
+#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28
+#define SI_SMC_SOFT_REGISTER_seq_index 0x5C
+#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60
+#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70
+#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78
+#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88
+#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C
+#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98
+#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8
+#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
+#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
+#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
+#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
+
+struct PP_SIslands_FanTable
+{
+ uint8_t fdo_mode;
+ uint8_t padding;
+ int16_t temp_min;
+ int16_t temp_med;
+ int16_t temp_max;
+ int16_t slope1;
+ int16_t slope2;
+ int16_t fdo_min;
+ int16_t hys_up;
+ int16_t hys_down;
+ int16_t hys_slope;
+ int16_t temp_resp_lim;
+ int16_t temp_curr;
+ int16_t slope_curr;
+ int16_t pwm_curr;
+ uint32_t refresh_period;
+ int16_t fdo_max;
+ uint8_t temp_src;
+ int8_t padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
+
+#define SMC_SISLANDS_SCALE_I 7
+#define SMC_SISLANDS_SCALE_R 12
+
+struct PP_SIslands_CacConfig
+{
+ uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
+ uint32_t lkge_lut_V0;
+ uint32_t lkge_lut_Vstep;
+ uint32_t WinTime;
+ uint32_t R_LL;
+ uint32_t calculation_repeats;
+ uint32_t l2numWin_TDP;
+ uint32_t dc_cac;
+ uint8_t lts_truncate_n;
+ uint8_t SHIFT_N;
+ uint8_t log2_PG_LKG_SCALE;
+ uint8_t cac_temp;
+ uint32_t lkge_lut_T0;
+ uint32_t lkge_lut_Tstep;
+};
+
+typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
+
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+
+struct SMC_SIslands_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
+
+struct SMC_SIslands_MCRegisterSet
+{
+ uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
+
+struct SMC_SIslands_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+ SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
+
+struct SMC_SIslands_MCArbDramTimingRegisterSet
+{
+ uint32_t mc_arb_dram_timing;
+ uint32_t mc_arb_dram_timing2;
+ uint8_t mc_arb_rfsh_rate;
+ uint8_t mc_arb_burst_time;
+ uint8_t padding[2];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
+
+struct SMC_SIslands_MCArbDramTimingRegisters
+{
+ uint8_t arb_current;
+ uint8_t reserved[3];
+ SMC_SIslands_MCArbDramTimingRegisterSet data[16];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
+
+struct SMC_SISLANDS_SPLL_DIV_TABLE
+{
+ uint32_t freq[256];
+ uint32_t ss[256];
+};
+
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
+
+typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
+
+#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
+
+#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
+
+struct Smc_SIslands_DTE_Configuration
+{
+ uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+ uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+ uint32_t K;
+ uint32_t T0;
+ uint32_t MaxT;
+ uint8_t WindowSize;
+ uint8_t Tdep_count;
+ uint8_t temp_select;
+ uint8_t DTE_mode;
+ uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ uint32_t Tthreshold;
+};
+
+typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
+
+#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0
+#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
+#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC
+#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10
+#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14
+#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
+#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38
+#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40
+#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48
+
+#pragma pack(pop)
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+void amdgpu_si_start_smc(struct amdgpu_device *adev);
+void amdgpu_si_reset_smc(struct amdgpu_device *adev);
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 *value, u32 limit);
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 value, u32 limit);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
deleted file mode 100644
index f06f6f4dc3a8..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_smum.h"
-
-MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int tonga_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- tonga_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/tonga_smc.bin";
- int err;
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int tonga_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = tonga_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int tonga_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int tonga_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- /* smu init only needs to be called at startup, not resume.
- * It should be in sw_init, but requires the fw info gathered
- * in sw_init from other IP modules.
- */
- ret = tonga_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = tonga_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int tonga_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
- /* smu fini only needs to be called at teardown, not suspend.
- * It should be in sw_fini, but we put it here for symmetry
- * with smu init.
- */
- tonga_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int tonga_dpm_suspend(void *handle)
-{
- return tonga_dpm_hw_fini(handle);
-}
-
-static int tonga_dpm_resume(void *handle)
-{
- return tonga_dpm_hw_init(handle);
-}
-
-static int tonga_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int tonga_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs tonga_dpm_ip_funcs = {
- .name = "tonga_dpm",
- .early_init = tonga_dpm_early_init,
- .late_init = NULL,
- .sw_init = tonga_dpm_sw_init,
- .sw_fini = tonga_dpm_sw_fini,
- .hw_init = tonga_dpm_hw_init,
- .hw_fini = tonga_dpm_hw_fini,
- .suspend = tonga_dpm_suspend,
- .resume = tonga_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = tonga_dpm_set_clockgating_state,
- .set_powergating_state = tonga_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &tonga_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index c92055805a45..d127d59f953a 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,10 +373,10 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int tonga_ih_soft_reset(void *handle)
+static int tonga_ih_check_soft_reset(void *handle)
{
- u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -384,6 +384,48 @@ static int tonga_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
+ adev->irq.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
+ adev->irq.srbm_soft_reset = 0;
+ }
+
+ return 0;
+}
+
+static int tonga_ih_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ return 0;
+
+ return tonga_ih_hw_fini(adev);
+}
+
+static int tonga_ih_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ return 0;
+
+ return tonga_ih_hw_init(adev);
+}
+
+static int tonga_ih_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ return 0;
+ srbm_soft_reset = adev->irq.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -427,7 +469,10 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = {
.resume = tonga_ih_resume,
.is_idle = tonga_ih_is_idle,
.wait_for_idle = tonga_ih_wait_for_idle,
+ .check_soft_reset = tonga_ih_check_soft_reset,
+ .pre_soft_reset = tonga_ih_pre_soft_reset,
.soft_reset = tonga_ih_soft_reset,
+ .post_soft_reset = tonga_ih_post_soft_reset,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
deleted file mode 100644
index 940de1836f8f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_ppsmc.h"
-#include "tonga_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#define TONGA_SMC_SIZE 0x20000
-
-static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, 0x20000);
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!tonga_is_smc_ram_running(adev))
- {
- return -EINVAL;
- }
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- if (!tonga_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc(adev, msg);
-}
-
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!tonga_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t *data;
- unsigned long flags;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > TONGA_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- WREG32(mmSMC_IND_DATA_0, data[0]);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = tonga_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = tonga_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_smu_stop_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC2;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
-
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
- (fw_type == UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
- data_size = le32_to_cpu(header->jt_size) << 2;
- }
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
-
- return 0;
-}
-
-static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK;
-
- if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
- int i;
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = tonga_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Clear status */
- WREG32_SMC(ixSMU_STATUS, 0);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Set SMU Auto Start */
- val = RREG32_SMC(ixSMU_INPUT_DATA);
- val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
- WREG32_SMC(ixSMU_INPUT_DATA, val);
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Interrupt is not enabled by firmware\n");
- return -EINVAL;
- }
-
- /* Call Test SMU message with 0x20000 offset
- * to trigger SMU start
- */
- tonga_send_msg_to_smc_offset(adev);
-
- /* Wait for done bit to be set */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMU_STATUS);
- if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMU start\n");
- return -EINVAL;
- }
-
- /* Check pass/failed indicator */
- val = RREG32_SMC(ixSMU_STATUS);
- if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
- DRM_ERROR("SMU Firmware start failed\n");
- return -EINVAL;
- }
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMU firmware initialization failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
- int i, result;
- uint32_t val;
-
- /* wait for smc boot up */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
- if (val)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMC boot sequence is not completed\n");
- return -EINVAL;
- }
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = tonga_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Set smc instruct start point at 0x0 */
- tonga_program_jump_on_start(adev);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMC firmware initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int tonga_smu_start(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
-
- if (!tonga_is_smc_ram_running(adev)) {
- val = RREG32_SMC(ixSMU_FIRMWARE);
- if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
- result = tonga_smu_start_in_non_protection_mode(adev);
- if (result)
- return result;
- } else {
- result = tonga_smu_start_in_protection_mode(adev);
- if (result)
- return result;
- }
- }
-
- return tonga_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
- .check_fw_load_finish = tonga_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int tonga_smu_init(struct amdgpu_device *adev)
-{
- struct tonga_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- uint32_t smu_internal_buffer_size = 200*4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- void *smu_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Allocate buffer for SMU internal buffer */
- ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, smu_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the SMU internal buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- private->smu_buffer_addr_low = lower_32_bits(mc_addr);
- private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
- adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
-
- return 0;
-}
-
-int tonga_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 132e613ed674..f6c941550b8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -526,6 +526,20 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
+static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* uvd_v4_2_ring_emit_ib */
+}
+
+static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 2 + /* uvd_v4_2_ring_emit_hdp_flush */
+ 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+ 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
+}
+
/**
* uvd_v4_2_mc_resume - memory controller programming
*
@@ -756,6 +770,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
+ .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 101de136ba63..400c16fe579e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -112,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -577,6 +577,20 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
+static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 6; /* uvd_v5_0_ring_emit_ib */
+}
+
+static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 2 + /* uvd_v5_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+ 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
+}
+
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -807,6 +821,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7f21102bfb99..e0fd9f21ed95 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -396,21 +396,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
uvd_v6_0_mc_resume(adev);
- /* Set dynamic clock gating in S/W control mode */
- if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
- uvd_v6_0_set_sw_clock_gating(adev);
- } else {
- /* disable clock gating */
- uint32_t data = RREG32(mmUVD_CGC_CTRL);
- data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- WREG32(mmUVD_CGC_CTRL, data);
- }
+ /* disable clock gating */
+ WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
/* disable interupt */
- WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
/* stall UMC and register bus before resetting VCPU */
- WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+ WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
@@ -426,7 +419,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
mdelay(5);
/* take UVD block out of reset */
- WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+ WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
mdelay(5);
/* initialize UVD memory controller */
@@ -461,7 +454,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
- WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+ WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
/* boot up the VCPU */
WREG32(mmUVD_SOFT_RESET, 0);
@@ -481,11 +474,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
break;
DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
- WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
mdelay(10);
- WREG32_P(mmUVD_SOFT_RESET, 0,
- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
mdelay(10);
r = -1;
}
@@ -502,15 +493,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
/* clear the bit 4 of UVD_STATUS */
WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
- tmp = 0;
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- /* force RBC into idle state */
WREG32(mmUVD_RBC_RB_CNTL, tmp);
/* set the write pointer delay */
@@ -531,7 +521,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
- WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+ WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
return 0;
}
@@ -735,6 +725,31 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
+static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 8; /* uvd_v6_0_ring_emit_ib */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+ return
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 20 + /* uvd_v6_0_ring_emit_vm_flush */
+ 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
+}
+
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -748,20 +763,82 @@ static int uvd_v6_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+ if (uvd_v6_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
}
-static int uvd_v6_0_soft_reset(void *handle)
+#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
+static int uvd_v6_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(mmSRBM_STATUS);
+
+ if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
+ REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
+ (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+ if (srbm_soft_reset) {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
+ adev->uvd.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
+ adev->uvd.srbm_soft_reset = 0;
+ }
+ return 0;
+}
+static int uvd_v6_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ return 0;
uvd_v6_0_stop(adev);
+ return 0;
+}
+
+static int uvd_v6_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ return 0;
+ srbm_soft_reset = adev->uvd.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int uvd_v6_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ return 0;
- WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
- ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
mdelay(5);
return uvd_v6_0_start(adev);
@@ -902,21 +979,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- static int curstate = -1;
if (adev->asic_type == CHIP_FIJI ||
- adev->asic_type == CHIP_POLARIS10)
- uvd_v6_set_bypass_mode(adev, enable);
+ adev->asic_type == CHIP_POLARIS10)
+ uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (curstate == state)
- return 0;
-
- curstate = state;
- if (enable) {
+ if (state == AMD_CG_STATE_GATE) {
/* disable HW gating and enable Sw gating */
uvd_v6_0_set_sw_clock_gating(adev);
} else {
@@ -946,6 +1017,8 @@ static int uvd_v6_0_set_powergating_state(void *handle,
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
+ WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+
if (state == AMD_PG_STATE_GATE) {
uvd_v6_0_stop(adev);
return 0;
@@ -966,7 +1039,10 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.resume = uvd_v6_0_resume,
.is_idle = uvd_v6_0_is_idle,
.wait_for_idle = uvd_v6_0_wait_for_idle,
+ .check_soft_reset = uvd_v6_0_check_soft_reset,
+ .pre_soft_reset = uvd_v6_0_pre_soft_reset,
.soft_reset = uvd_v6_0_soft_reset,
+ .post_soft_reset = uvd_v6_0_post_soft_reset,
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
};
@@ -986,6 +1062,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@@ -1005,6 +1083,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 80a37a602181..76e64ad04a53 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -30,16 +30,17 @@
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "cikd.h"
-
#include "vce/vce_2_0_d.h"
#include "vce/vce_2_0_sh_mask.h"
-
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#define VCE_V2_0_FW_SIZE (256 * 1024)
#define VCE_V2_0_STACK_SIZE (64 * 1024)
#define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -96,6 +97,49 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmVCE_RB_WPTR2, ring->wptr);
}
+static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_LMI_STATUS);
+
+ if (status & 0x337f)
+ return 0;
+ mdelay(10);
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_STATUS);
+
+ if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
/**
* vce_v2_0_start - start VCE block
*
@@ -106,7 +150,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
static int vce_v2_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int i, j, r;
+ int r;
vce_v2_0_mc_resume(adev);
@@ -127,36 +171,12 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
mdelay(100);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(mmVCE_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- r = -1;
- }
+ r = vce_v2_0_firmware_loaded(adev);
/* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~1);
@@ -173,6 +193,8 @@ static int vce_v2_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->vce.num_rings = 2;
+
vce_v2_0_set_ring_funcs(adev);
vce_v2_0_set_irq_funcs(adev);
@@ -182,7 +204,7 @@ static int vce_v2_0_early_init(void *handle)
static int vce_v2_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- int r;
+ int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCE */
@@ -199,19 +221,14 @@ static int vce_v2_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->vce.ring[0];
- sprintf(ring->name, "vce0");
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
- if (r)
- return r;
-
- ring = &adev->vce.ring[1];
- sprintf(ring->name, "vce1");
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
- if (r)
- return r;
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ ring = &adev->vce.ring[i];
+ sprintf(ring->name, "vce%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+ &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ if (r)
+ return r;
+ }
return r;
}
@@ -234,29 +251,23 @@ static int vce_v2_0_sw_fini(void *handle)
static int vce_v2_0_hw_init(void *handle)
{
- struct amdgpu_ring *ring;
- int r;
+ int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = vce_v2_0_start(adev);
+ /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
if (r)
-/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
return 0;
- ring = &adev->vce.ring[0];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
- return r;
- }
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].ready = false;
- ring = &adev->vce.ring[1];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
- return r;
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ if (r)
+ return r;
+ else
+ adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
@@ -338,47 +349,50 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
{
- u32 orig, tmp;
+ if (vce_v2_0_wait_for_idle(adev)) {
+ DRM_INFO("VCE is busy, Can't set clock gateing");
+ return;
+ }
- if (gated) {
- if (vce_v2_0_wait_for_idle(adev)) {
- DRM_INFO("VCE is busy, Can't set clock gateing");
- return;
- }
- WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(100);
- WREG32(mmVCE_STATUS, 0);
- } else {
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(100);
+ WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
+
+ if (vce_v2_0_lmi_clean(adev)) {
+ DRM_INFO("LMI is busy, Can't set clock gateing");
+ return;
}
- tmp = RREG32(mmVCE_CLOCK_GATING_B);
- tmp &= ~0x00060006;
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32(mmVCE_STATUS, 0);
+
+ if (gated)
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+ /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
if (gated) {
- tmp |= 0xe10000;
+ /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
+ WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
} else {
- tmp |= 0xe1;
- tmp &= ~0xe10000;
+ /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
+ WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
}
- WREG32(mmVCE_CLOCK_GATING_B, tmp);
- orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
- tmp &= ~0x1fe000;
- tmp &= ~0xff000000;
- if (tmp != orig)
- WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+ /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
+ WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
- orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
- tmp &= ~0x3fc;
- if (tmp != orig)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
- if (gated)
- WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
+ if(!gated) {
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ mdelay(100);
+ WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+ vce_v2_0_firmware_loaded(adev);
+ WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+ }
}
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@@ -458,9 +472,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
- WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
vce_v2_0_init_cg(adev);
}
@@ -474,11 +486,11 @@ static bool vce_v2_0_is_idle(void *handle)
static int vce_v2_0_wait_for_idle(void *handle)
{
- unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+ if (vce_v2_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
@@ -488,8 +500,7 @@ static int vce_v2_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
- ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+ WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
mdelay(5);
return vce_v2_0_start(adev);
@@ -516,10 +527,8 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
DRM_DEBUG("IH: VCE\n");
switch (entry->src_data) {
case 0:
- amdgpu_fence_process(&adev->vce.ring[0]);
- break;
case 1:
- amdgpu_fence_process(&adev->vce.ring[1]);
+ amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -530,11 +539,28 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+ else
+ tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
+
static int vce_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+
+ vce_v2_0_set_bypass_mode(adev, enable);
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -596,12 +622,16 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
+ .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
+ .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
{
- adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
- adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
+ int i;
+
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
}
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index c271abffd8dd..3f6db4ec0102 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,6 +37,9 @@
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -67,8 +70,10 @@ static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
if (ring == &adev->vce.ring[0])
return RREG32(mmVCE_RB_RPTR);
- else
+ else if (ring == &adev->vce.ring[1])
return RREG32(mmVCE_RB_RPTR2);
+ else
+ return RREG32(mmVCE_RB_RPTR3);
}
/**
@@ -84,8 +89,10 @@ static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
if (ring == &adev->vce.ring[0])
return RREG32(mmVCE_RB_WPTR);
- else
+ else if (ring == &adev->vce.ring[1])
return RREG32(mmVCE_RB_WPTR2);
+ else
+ return RREG32(mmVCE_RB_WPTR3);
}
/**
@@ -101,108 +108,80 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
if (ring == &adev->vce.ring[0])
WREG32(mmVCE_RB_WPTR, ring->wptr);
- else
+ else if (ring == &adev->vce.ring[1])
WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ else
+ WREG32(mmVCE_RB_WPTR3, ring->wptr);
}
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
{
- u32 tmp, data;
-
- tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
- if (override)
- data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
- else
- data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-
- if (tmp != data)
- WREG32(mmVCE_RB_ARB_CTRL, data);
+ WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
}
static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
bool gated)
{
- u32 tmp, data;
+ u32 data;
+
/* Set Override to disable Clock Gating */
vce_v3_0_override_vce_clock_gating(adev, true);
- if (!gated) {
- /* Force CLOCK ON for VCE_CLOCK_GATING_B,
- * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
- * VREG can be FORCE ON or set to Dynamic, but can't be OFF
- */
- tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+ /* This function enables MGCG which is controlled by firmware.
+ With the clocks in the gated state the core is still
+ accessible but the firmware will throttle the clocks on the
+ fly as necessary.
+ */
+ if (gated) {
+ data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff;
data &= ~0xef0000;
- if (tmp != data)
- WREG32(mmVCE_CLOCK_GATING_B, data);
+ WREG32(mmVCE_CLOCK_GATING_B, data);
- /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
- * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
- */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+ data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0x3ff000;
data &= ~0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING, data);
+ WREG32(mmVCE_UENC_CLOCK_GATING, data);
- /* set VCE_UENC_CLOCK_GATING_2 */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+ data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x2;
- data &= ~0x2;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+ data &= ~0x00010000;
+ WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
- /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
- tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data |= 0x37f;
- if (tmp != data)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
- /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
- tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+ data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8;
- if (tmp != data)
- WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+ VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+ VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
+ 0x8;
+ WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
} else {
- /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
- * {*, *_FORCE_OFF} = {*, 1}
- * set VREG to Dynamic, as it can't be OFF
- */
- tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+ data = RREG32(mmVCE_CLOCK_GATING_B);
data &= ~0x80010;
data |= 0xe70008;
- if (tmp != data)
- WREG32(mmVCE_CLOCK_GATING_B, data);
- /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
- * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
- * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
- */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+ WREG32(mmVCE_CLOCK_GATING_B, data);
+
+ data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING, data);
- /* Set VCE_UENC_CLOCK_GATING_2 */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+ WREG32(mmVCE_UENC_CLOCK_GATING, data);
+
+ data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x10000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
- /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
- tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+
+ data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data &= ~0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
- /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
- tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+
+ data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8);
- if (tmp != data)
- WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+ VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+ VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
+ 0x8);
+ WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
}
vce_v3_0_override_vce_clock_gating(adev, false);
}
@@ -221,12 +200,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
}
DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
mdelay(10);
- WREG32_P(mmVCE_SOFT_RESET, 0,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
mdelay(10);
}
@@ -259,43 +235,34 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+ ring = &adev->vce.ring[2];
+ WREG32(mmVCE_RB_RPTR3, ring->wptr);
+ WREG32(mmVCE_RB_WPTR3, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx))
continue;
- if (idx == 0)
- WREG32_P(mmGRBM_GFX_INDEX, 0,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
- else
- WREG32_P(mmGRBM_GFX_INDEX,
- GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
vce_v3_0_mc_resume(adev, idx);
-
- WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
- ~VCE_STATUS__JOB_BUSY_MASK);
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
else
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
- ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
- WREG32_P(mmVCE_SOFT_RESET, 0,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
mdelay(100);
r = vce_v3_0_firmware_loaded(adev);
/* clear BUSY flag */
- WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
-
- /* Set Clock-Gating off */
- if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
- vce_v3_0_set_vce_sw_clock_gating(adev, false);
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
if (r) {
DRM_ERROR("VCE not responding, giving up!!!\n");
@@ -304,7 +271,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
}
}
- WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -319,33 +286,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
- if (idx == 0)
- WREG32_P(mmGRBM_GFX_INDEX, 0,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
- else
- WREG32_P(mmGRBM_GFX_INDEX,
- GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
else
- WREG32_P(mmVCE_VCPU_CNTL, 0,
- ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
+
/* hold on ECPU */
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
/* clear BUSY flag */
- WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
/* Set Clock-Gating off */
if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
}
- WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -399,6 +358,8 @@ static int vce_v3_0_early_init(void *handle)
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
return -ENOENT;
+ adev->vce.num_rings = 3;
+
vce_v3_0_set_ring_funcs(adev);
vce_v3_0_set_irq_funcs(adev);
@@ -409,7 +370,7 @@ static int vce_v3_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int r;
+ int r, i;
/* VCE */
r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
@@ -425,19 +386,14 @@ static int vce_v3_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->vce.ring[0];
- sprintf(ring->name, "vce0");
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
- if (r)
- return r;
-
- ring = &adev->vce.ring[1];
- sprintf(ring->name, "vce1");
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
- if (r)
- return r;
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ ring = &adev->vce.ring[i];
+ sprintf(ring->name, "vce%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+ &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ if (r)
+ return r;
+ }
return r;
}
@@ -467,10 +423,10 @@ static int vce_v3_0_hw_init(void *handle)
if (r)
return r;
- adev->vce.ring[0].ready = false;
- adev->vce.ring[1].ready = false;
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].ready = false;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < adev->vce.num_rings; i++) {
r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
if (r)
return r;
@@ -534,7 +490,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
- WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+ WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
WREG32(mmVCE_LMI_CTRL, 0x00398000);
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
@@ -573,9 +529,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
}
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
- WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
static bool vce_v3_0_is_idle(void *handle)
@@ -601,20 +555,108 @@ static int vce_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
+#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
+#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
+#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
+#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
+ VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
+
+static int vce_v3_0_check_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ /* According to VCE team , we should use VCE_STATUS instead
+ * SRBM_STATUS.VCE_BUSY bit for busy status checking.
+ * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
+ * instance's registers are accessed
+ * (0 for 1st instance, 10 for 2nd instance).
+ *
+ *VCE_STATUS
+ *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
+ *|----+----+-----------+----+----+----+----------+---------+----|
+ *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
+ *
+ * VCE team suggest use bit 3--bit 6 for busy status check
+ */
+ mutex_lock(&adev->grbm_idx_mutex);
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+ }
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+ if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+ }
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+
+ if (srbm_soft_reset) {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
+ adev->vce.srbm_soft_reset = srbm_soft_reset;
+ } else {
+ adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
+ adev->vce.srbm_soft_reset = 0;
+ }
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return 0;
+}
+
static int vce_v3_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 mask = 0;
+ u32 srbm_soft_reset;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ return 0;
+ srbm_soft_reset = adev->vce.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int vce_v3_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ return 0;
- WREG32_P(mmSRBM_SOFT_RESET, mask,
- ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
- SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
mdelay(5);
- return vce_v3_0_start(adev);
+ return vce_v3_0_suspend(adev);
+}
+
+
+static int vce_v3_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ return 0;
+
+ mdelay(5);
+
+ return vce_v3_0_resume(adev);
}
static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -637,13 +679,12 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
{
DRM_DEBUG("IH: VCE\n");
- WREG32_P(mmVCE_SYS_INT_STATUS,
- VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
- ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
+ WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
switch (entry->src_data) {
case 0:
case 1:
+ case 2:
amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
break;
default:
@@ -655,7 +696,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
@@ -674,8 +715,10 @@ static int vce_v3_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
- if (adev->asic_type == CHIP_POLARIS10)
- vce_v3_set_bypass_mode(adev, enable);
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_TONGA) ||
+ (adev->asic_type == CHIP_FIJI))
+ vce_v3_0_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
@@ -686,13 +729,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
if (adev->vce.harvest_config & (1 << i))
continue;
- if (i == 0)
- WREG32_P(mmGRBM_GFX_INDEX, 0,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
- else
- WREG32_P(mmGRBM_GFX_INDEX,
- GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -711,7 +748,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
- WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -739,6 +776,60 @@ static int vce_v3_0_set_powergating_state(void *handle,
return vce_v3_0_start(adev);
}
+static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+ amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+}
+
+static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vm_id, uint64_t pd_addr)
+{
+ amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, VCE_CMD_END);
+}
+
+static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq);
+}
+
+static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 5; /* vce_v3_0_ring_emit_ib */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+ return
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
+}
+
const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
@@ -751,12 +842,15 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.resume = vce_v3_0_resume,
.is_idle = vce_v3_0_is_idle,
.wait_for_idle = vce_v3_0_wait_for_idle,
+ .check_soft_reset = vce_v3_0_check_soft_reset,
+ .pre_soft_reset = vce_v3_0_pre_soft_reset,
.soft_reset = vce_v3_0_soft_reset,
+ .post_soft_reset = vce_v3_0_post_soft_reset,
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
};
-static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
+static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
@@ -769,12 +863,42 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
+ .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
+};
+
+static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ .get_rptr = vce_v3_0_ring_get_rptr,
+ .get_wptr = vce_v3_0_ring_get_wptr,
+ .set_wptr = vce_v3_0_ring_set_wptr,
+ .parse_cs = NULL,
+ .emit_ib = vce_v3_0_ring_emit_ib,
+ .emit_vm_flush = vce_v3_0_emit_vm_flush,
+ .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .test_ring = amdgpu_vce_ring_test_ring,
+ .test_ib = amdgpu_vce_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
+ .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{
- adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
- adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
+ int i;
+
+ if (adev->asic_type >= CHIP_STONEY) {
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+ DRM_INFO("VCE enabled in VM mode\n");
+ } else {
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+ DRM_INFO("VCE enabled in physical mode\n");
+ }
}
static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 03a31c53aec3..c0d9aad7126f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,7 +77,11 @@
#if defined(CONFIG_DRM_AMD_ACP)
#include "amdgpu_acp.h"
#endif
+#include "dce_virtual.h"
+MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
@@ -444,18 +448,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{
- u32 caps = 0;
- u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
-
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
- caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
-
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
- caps |= AMDGPU_VIRT_CAPS_IS_VF;
-
- return caps;
+ uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ /* bit0: 0 means pf and 1 means vf */
+ /* bit31: 0 means disable IOV and 1 means enable */
+ if (reg & 1)
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+ if (reg & 0x80000000)
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+ if (reg == 0) {
+ if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
}
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
@@ -822,6 +829,60 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &iceland_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v2_4_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -890,6 +951,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -958,6 +1087,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1026,6 +1223,74 @@ static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version cz_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1103,34 +1368,142 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
#endif
};
+static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+#if defined(CONFIG_DRM_AMD_ACP)
+ {
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+ },
+#endif
+};
+
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
+ if (adev->enable_virtual_display) {
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ adev->ip_blocks = topaz_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
+ break;
+ case CHIP_FIJI:
+ adev->ip_blocks = fiji_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
+ break;
+ case CHIP_TONGA:
+ adev->ip_blocks = tonga_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ adev->ip_blocks = polaris11_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
+ break;
+
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ adev->ip_blocks = cz_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ adev->ip_blocks = topaz_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
+ break;
+ case CHIP_FIJI:
+ adev->ip_blocks = fiji_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+ break;
+ case CHIP_TONGA:
+ adev->ip_blocks = tonga_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ adev->ip_blocks = polaris11_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ adev->ip_blocks = cz_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
}
return 0;
@@ -1154,13 +1527,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
.read_bios_from_rom = &vi_read_bios_from_rom,
+ .detect_hw_virtualization = vi_detect_hw_virtualization,
.read_register = &vi_read_register,
.reset = &vi_asic_reset,
.set_vga_state = &vi_vga_set_state,
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
- .get_virtual_caps = &vi_get_virtual_caps,
};
static int vi_common_early_init(void *handle)
@@ -1248,8 +1621,17 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
- AMD_CG_SUPPORT_SDMA_LS;
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCE_MGCG;
+ /* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
+ if (adev->rev_id != 0x00) {
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_UVD |
+ AMD_PG_SUPPORT_VCE;
+ }
adev->external_rev_id = adev->rev_id + 0x1;
break;
case CHIP_STONEY:
@@ -1267,14 +1649,24 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
- AMD_CG_SUPPORT_SDMA_LS;
- adev->external_rev_id = adev->rev_id + 0x1;
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCE_MGCG;
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_UVD |
+ AMD_PG_SUPPORT_VCE;
+ adev->external_rev_id = adev->rev_id + 0x61;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
+ /* in early init stage, vbios code won't work */
+ if (adev->asic_funcs->detect_hw_virtualization)
+ amdgpu_asic_detect_hw_virtualization(adev);
+
if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true;
@@ -1418,6 +1810,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
}
+static int vi_common_set_clockgating_state_by_smu(void *handle,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_MC,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_SDMA,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_HDP,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_DRM,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_ROM,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
static int vi_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1443,6 +1892,10 @@ static int vi_common_set_clockgating_state(void *handle,
vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ vi_common_set_clockgating_state_by_smu(adev, state);
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 062ee1676480..11746f22d0c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -369,4 +369,45 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
+#define VCE_CMD_IB_VM 0x00000102
+#define VCE_CMD_WAIT_GE 0x00000106
+#define VCE_CMD_UPDATE_PTB 0x00000107
+#define VCE_CMD_FLUSH_TLB 0x00000108
+
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x) ((x) << 0)
+#define RB_MAP_PKR0_MASK (0x3 << 0)
+#define RB_MAP_PKR1(x) ((x) << 2)
+#define RB_MAP_PKR1_MASK (0x3 << 2)
+#define RB_XSEL2(x) ((x) << 4)
+#define RB_XSEL2_MASK (0x3 << 4)
+#define RB_XSEL (1 << 6)
+#define RB_YSEL (1 << 7)
+#define PKR_MAP(x) ((x) << 8)
+#define PKR_MAP_MASK (0x3 << 8)
+#define PKR_XSEL(x) ((x) << 10)
+#define PKR_XSEL_MASK (0x3 << 10)
+#define PKR_YSEL(x) ((x) << 12)
+#define PKR_YSEL_MASK (0x3 << 12)
+#define SC_MAP(x) ((x) << 16)
+#define SC_MAP_MASK (0x3 << 16)
+#define SC_XSEL(x) ((x) << 18)
+#define SC_XSEL_MASK (0x3 << 18)
+#define SC_YSEL(x) ((x) << 20)
+#define SC_YSEL_MASK (0x3 << 20)
+#define SE_MAP(x) ((x) << 24)
+#define SE_MAP_MASK (0x3 << 24)
+#define SE_XSEL(x) ((x) << 26)
+#define SE_XSEL_MASK (0x3 << 26)
+#define SE_YSEL(x) ((x) << 28)
+#define SE_YSEL_MASK (0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x) ((x) << 0)
+#define SE_PAIR_MAP_MASK (0x3 << 0)
+#define SE_PAIR_XSEL(x) ((x) << 2)
+#define SE_PAIR_XSEL_MASK (0x3 << 2)
+#define SE_PAIR_YSEL(x) ((x) << 4)
+#define SE_PAIR_YSEL_MASK (0x3 << 4)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index e621eba63126..453c5d66e5c3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -142,13 +142,15 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("mapping doorbell page:\n");
- pr_debug(" target user address == 0x%08llX\n",
- (unsigned long long) vma->vm_start);
- pr_debug(" physical address == 0x%08llX\n", address);
- pr_debug(" vm_flags == 0x%04lX\n", vma->vm_flags);
- pr_debug(" size == 0x%04lX\n",
- doorbell_process_allocation());
+ pr_debug("kfd: mapping doorbell page in %s\n"
+ " target user address == 0x%08llX\n"
+ " physical address == 0x%08llX\n"
+ " vm_flags == 0x%04lX\n"
+ " size == 0x%04lX\n",
+ __func__,
+ (unsigned long long) vma->vm_start, address, vma->vm_flags,
+ doorbell_process_allocation());
+
return io_remap_pfn_range(vma,
vma->vm_start,
@@ -184,7 +186,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
sizeof(u32)) + inx;
pr_debug("kfd: get kernel queue doorbell\n"
- " doorbell offset == 0x%08d\n"
+ " doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 9beae87aadd5..d135cd002a95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -47,6 +47,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
__func__, KFD_QUEUE_TYPE_HIQ, queue_size);
+ memset(&prop, 0, sizeof(prop));
+ memset(&nop, 0, sizeof(nop));
+
nop.opcode = IT_NOP;
nop.type = PM4_TYPE_3;
nop.u32all |= PM4_COUNT_ZERO;
@@ -121,7 +124,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
- if (init_queue(&kq->queue, prop) != 0)
+ if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
kq->queue->device = dev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 80113c335966..4750cabe4252 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -619,7 +619,7 @@ int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
-int init_queue(struct queue **q, struct queue_properties properties);
+int init_queue(struct queue **q, const struct queue_properties *properties);
void uninit_queue(struct queue *q);
void print_queue_properties(struct queue_properties *q);
void print_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 4f3849ac8c07..ef7c8de7060e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -404,58 +404,47 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
{
struct kfd_process *p;
struct kfd_process_device *pdd;
- int idx, i;
BUG_ON(dev == NULL);
- idx = srcu_read_lock(&kfd_processes_srcu);
-
/*
* Look for the process that matches the pasid. If there is no such
* process, we either released it in amdkfd's own notifier, or there
* is a bug. Unfortunately, there is no way to tell...
*/
- hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
- if (p->pasid == pasid) {
-
- srcu_read_unlock(&kfd_processes_srcu, idx);
-
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ p = kfd_lookup_process_by_pasid(pasid);
+ if (!p)
+ return;
- mutex_lock(&p->mutex);
-
- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(dev->dbgmgr);
-
- pqm_uninit(&p->pqm);
+ pr_debug("Unbinding process %d from IOMMU\n", pasid);
- pdd = kfd_get_process_device_data(dev, p);
+ if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(dev->dbgmgr);
- if (!pdd) {
- mutex_unlock(&p->mutex);
- return;
- }
+ pqm_uninit(&p->pqm);
- if (pdd->reset_wavefronts) {
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
- }
+ pdd = kfd_get_process_device_data(dev, p);
- /*
- * Just mark pdd as unbound, because we still need it
- * to call amd_iommu_unbind_pasid() in when the
- * process exits.
- * We don't call amd_iommu_unbind_pasid() here
- * because the IOMMU called us.
- */
- pdd->bound = false;
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
- mutex_unlock(&p->mutex);
+ if (pdd->reset_wavefronts) {
+ dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ pdd->reset_wavefronts = false;
+ }
- return;
- }
+ /*
+ * Just mark pdd as unbound, because we still need it
+ * to call amd_iommu_unbind_pasid() in when the
+ * process exits.
+ * We don't call amd_iommu_unbind_pasid() here
+ * because the IOMMU called us.
+ */
+ pdd->bound = false;
- srcu_read_unlock(&kfd_processes_srcu, idx);
+ mutex_unlock(&p->mutex);
}
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7b69070f7ecc..e1fb40b84c72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -129,7 +129,7 @@ static int create_cp_queue(struct process_queue_manager *pqm,
q_properties->vmid = 0;
q_properties->queue_id = qid;
- retval = init_queue(q, *q_properties);
+ retval = init_queue(q, q_properties);
if (retval != 0)
goto err_init_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 9a0c90b0702e..0ab197077f2d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -63,7 +63,7 @@ void print_queue(struct queue *q)
pr_debug("Queue Device Address: 0x%p\n", q->device);
}
-int init_queue(struct queue **q, struct queue_properties properties)
+int init_queue(struct queue **q, const struct queue_properties *properties)
{
struct queue *tmp;
@@ -73,7 +73,7 @@ int init_queue(struct queue **q, struct queue_properties properties)
if (!tmp)
return -ENOMEM;
- memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
+ memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
*q = tmp;
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 884c96f50c3d..1e5064749959 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1090,19 +1090,21 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
{
uint32_t hashout;
uint32_t buf[7];
+ uint64_t local_mem_size;
int i;
if (!gpu)
return 0;
+ local_mem_size = gpu->kfd2kgd->get_vmem_size(gpu->kgd);
+
buf[0] = gpu->pdev->devfn;
buf[1] = gpu->pdev->subsystem_vendor;
buf[2] = gpu->pdev->subsystem_device;
buf[3] = gpu->pdev->device;
buf[4] = gpu->pdev->bus->number;
- buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
- & 0xffffffff);
- buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+ buf[5] = lower_32_bits(local_mem_size);
+ buf[6] = upper_32_bits(local_mem_size);
for (i = 0, hashout = 0; i < 7; i++)
hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index a74a0d2ff1ca..c934b78c9e2f 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -29,7 +29,12 @@
* Supported ASIC types
*/
enum amd_asic_type {
- CHIP_BONAIRE = 0,
+ CHIP_TAHITI = 0,
+ CHIP_PITCAIRN,
+ CHIP_VERDE,
+ CHIP_OLAND,
+ CHIP_HAINAN,
+ CHIP_BONAIRE,
CHIP_KAVERI,
CHIP_KABINI,
CHIP_HAWAII,
@@ -159,8 +164,14 @@ struct amd_ip_funcs {
bool (*is_idle)(void *handle);
/* poll for idle */
int (*wait_for_idle)(void *handle);
+ /* check soft reset the IP block */
+ int (*check_soft_reset)(void *handle);
+ /* pre soft reset the IP block */
+ int (*pre_soft_reset)(void *handle);
/* soft reset the IP block */
int (*soft_reset)(void *handle);
+ /* post soft reset the IP block */
+ int (*post_soft_reset)(void *handle);
/* enable/disable cg for the IP block */
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
new file mode 100644
index 000000000000..66e39cdb5cb0
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 si_SECT_CONTEXT_def_1[] =
+{
+ 0x00000000, // DB_RENDER_CONTROL
+ 0x00000000, // DB_COUNT_CONTROL
+ 0x00000000, // DB_DEPTH_VIEW
+ 0x00000000, // DB_RENDER_OVERRIDE
+ 0x00000000, // DB_RENDER_OVERRIDE2
+ 0x00000000, // DB_HTILE_DATA_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_BOUNDS_MIN
+ 0x00000000, // DB_DEPTH_BOUNDS_MAX
+ 0x00000000, // DB_STENCIL_CLEAR
+ 0x00000000, // DB_DEPTH_CLEAR
+ 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+ 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_INFO
+ 0x00000000, // DB_Z_INFO
+ 0x00000000, // DB_STENCIL_INFO
+ 0x00000000, // DB_Z_READ_BASE
+ 0x00000000, // DB_STENCIL_READ_BASE
+ 0x00000000, // DB_Z_WRITE_BASE
+ 0x00000000, // DB_STENCIL_WRITE_BASE
+ 0x00000000, // DB_DEPTH_SIZE
+ 0x00000000, // DB_DEPTH_SLICE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // TA_BC_BASE_ADDR
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // COHER_DEST_BASE_2
+ 0x00000000, // COHER_DEST_BASE_3
+ 0x00000000, // PA_SC_WINDOW_OFFSET
+ 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+ 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+ 0x0000ffff, // PA_SC_CLIPRECT_RULE
+ 0x00000000, // PA_SC_CLIPRECT_0_TL
+ 0x40004000, // PA_SC_CLIPRECT_0_BR
+ 0x00000000, // PA_SC_CLIPRECT_1_TL
+ 0x40004000, // PA_SC_CLIPRECT_1_BR
+ 0x00000000, // PA_SC_CLIPRECT_2_TL
+ 0x40004000, // PA_SC_CLIPRECT_2_BR
+ 0x00000000, // PA_SC_CLIPRECT_3_TL
+ 0x40004000, // PA_SC_CLIPRECT_3_BR
+ 0xaa99aaaa, // PA_SC_EDGERULE
+ 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+ 0xffffffff, // CB_TARGET_MASK
+ 0xffffffff, // CB_SHADER_MASK
+ 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+ 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+ 0x00000000, // COHER_DEST_BASE_0
+ 0x00000000, // COHER_DEST_BASE_1
+ 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+ 0x00000000, // PA_SC_VPORT_ZMIN_0
+ 0x3f800000, // PA_SC_VPORT_ZMAX_0
+ 0x00000000, // PA_SC_VPORT_ZMIN_1
+ 0x3f800000, // PA_SC_VPORT_ZMAX_1
+ 0x00000000, // PA_SC_VPORT_ZMIN_2
+ 0x3f800000, // PA_SC_VPORT_ZMAX_2
+ 0x00000000, // PA_SC_VPORT_ZMIN_3
+ 0x3f800000, // PA_SC_VPORT_ZMAX_3
+ 0x00000000, // PA_SC_VPORT_ZMIN_4
+ 0x3f800000, // PA_SC_VPORT_ZMAX_4
+ 0x00000000, // PA_SC_VPORT_ZMIN_5
+ 0x3f800000, // PA_SC_VPORT_ZMAX_5
+ 0x00000000, // PA_SC_VPORT_ZMIN_6
+ 0x3f800000, // PA_SC_VPORT_ZMAX_6
+ 0x00000000, // PA_SC_VPORT_ZMIN_7
+ 0x3f800000, // PA_SC_VPORT_ZMAX_7
+ 0x00000000, // PA_SC_VPORT_ZMIN_8
+ 0x3f800000, // PA_SC_VPORT_ZMAX_8
+ 0x00000000, // PA_SC_VPORT_ZMIN_9
+ 0x3f800000, // PA_SC_VPORT_ZMAX_9
+ 0x00000000, // PA_SC_VPORT_ZMIN_10
+ 0x3f800000, // PA_SC_VPORT_ZMAX_10
+ 0x00000000, // PA_SC_VPORT_ZMIN_11
+ 0x3f800000, // PA_SC_VPORT_ZMAX_11
+ 0x00000000, // PA_SC_VPORT_ZMIN_12
+ 0x3f800000, // PA_SC_VPORT_ZMAX_12
+ 0x00000000, // PA_SC_VPORT_ZMIN_13
+ 0x3f800000, // PA_SC_VPORT_ZMAX_13
+ 0x00000000, // PA_SC_VPORT_ZMIN_14
+ 0x3f800000, // PA_SC_VPORT_ZMAX_14
+ 0x00000000, // PA_SC_VPORT_ZMIN_15
+ 0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const u32 si_SECT_CONTEXT_def_2[] =
+{
+ 0x00000000, // CP_PERFMON_CNTX_CNTL
+ 0x00000000, // CP_RINGID
+ 0x00000000, // CP_VMID
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0xffffffff, // VGT_MAX_VTX_INDX
+ 0x00000000, // VGT_MIN_VTX_INDX
+ 0x00000000, // VGT_INDX_OFFSET
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+ 0, // HOLE
+ 0x00000000, // CB_BLEND_RED
+ 0x00000000, // CB_BLEND_GREEN
+ 0x00000000, // CB_BLEND_BLUE
+ 0x00000000, // CB_BLEND_ALPHA
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_STENCIL_CONTROL
+ 0x00000000, // DB_STENCILREFMASK
+ 0x00000000, // DB_STENCILREFMASK_BF
+ 0, // HOLE
+ 0x00000000, // PA_CL_VPORT_XSCALE
+ 0x00000000, // PA_CL_VPORT_XOFFSET
+ 0x00000000, // PA_CL_VPORT_YSCALE
+ 0x00000000, // PA_CL_VPORT_YOFFSET
+ 0x00000000, // PA_CL_VPORT_ZSCALE
+ 0x00000000, // PA_CL_VPORT_ZOFFSET
+ 0x00000000, // PA_CL_VPORT_XSCALE_1
+ 0x00000000, // PA_CL_VPORT_XOFFSET_1
+ 0x00000000, // PA_CL_VPORT_YSCALE_1
+ 0x00000000, // PA_CL_VPORT_YOFFSET_1
+ 0x00000000, // PA_CL_VPORT_ZSCALE_1
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_1
+ 0x00000000, // PA_CL_VPORT_XSCALE_2
+ 0x00000000, // PA_CL_VPORT_XOFFSET_2
+ 0x00000000, // PA_CL_VPORT_YSCALE_2
+ 0x00000000, // PA_CL_VPORT_YOFFSET_2
+ 0x00000000, // PA_CL_VPORT_ZSCALE_2
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_2
+ 0x00000000, // PA_CL_VPORT_XSCALE_3
+ 0x00000000, // PA_CL_VPORT_XOFFSET_3
+ 0x00000000, // PA_CL_VPORT_YSCALE_3
+ 0x00000000, // PA_CL_VPORT_YOFFSET_3
+ 0x00000000, // PA_CL_VPORT_ZSCALE_3
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_3
+ 0x00000000, // PA_CL_VPORT_XSCALE_4
+ 0x00000000, // PA_CL_VPORT_XOFFSET_4
+ 0x00000000, // PA_CL_VPORT_YSCALE_4
+ 0x00000000, // PA_CL_VPORT_YOFFSET_4
+ 0x00000000, // PA_CL_VPORT_ZSCALE_4
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_4
+ 0x00000000, // PA_CL_VPORT_XSCALE_5
+ 0x00000000, // PA_CL_VPORT_XOFFSET_5
+ 0x00000000, // PA_CL_VPORT_YSCALE_5
+ 0x00000000, // PA_CL_VPORT_YOFFSET_5
+ 0x00000000, // PA_CL_VPORT_ZSCALE_5
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_5
+ 0x00000000, // PA_CL_VPORT_XSCALE_6
+ 0x00000000, // PA_CL_VPORT_XOFFSET_6
+ 0x00000000, // PA_CL_VPORT_YSCALE_6
+ 0x00000000, // PA_CL_VPORT_YOFFSET_6
+ 0x00000000, // PA_CL_VPORT_ZSCALE_6
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_6
+ 0x00000000, // PA_CL_VPORT_XSCALE_7
+ 0x00000000, // PA_CL_VPORT_XOFFSET_7
+ 0x00000000, // PA_CL_VPORT_YSCALE_7
+ 0x00000000, // PA_CL_VPORT_YOFFSET_7
+ 0x00000000, // PA_CL_VPORT_ZSCALE_7
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_7
+ 0x00000000, // PA_CL_VPORT_XSCALE_8
+ 0x00000000, // PA_CL_VPORT_XOFFSET_8
+ 0x00000000, // PA_CL_VPORT_YSCALE_8
+ 0x00000000, // PA_CL_VPORT_YOFFSET_8
+ 0x00000000, // PA_CL_VPORT_ZSCALE_8
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_8
+ 0x00000000, // PA_CL_VPORT_XSCALE_9
+ 0x00000000, // PA_CL_VPORT_XOFFSET_9
+ 0x00000000, // PA_CL_VPORT_YSCALE_9
+ 0x00000000, // PA_CL_VPORT_YOFFSET_9
+ 0x00000000, // PA_CL_VPORT_ZSCALE_9
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_9
+ 0x00000000, // PA_CL_VPORT_XSCALE_10
+ 0x00000000, // PA_CL_VPORT_XOFFSET_10
+ 0x00000000, // PA_CL_VPORT_YSCALE_10
+ 0x00000000, // PA_CL_VPORT_YOFFSET_10
+ 0x00000000, // PA_CL_VPORT_ZSCALE_10
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_10
+ 0x00000000, // PA_CL_VPORT_XSCALE_11
+ 0x00000000, // PA_CL_VPORT_XOFFSET_11
+ 0x00000000, // PA_CL_VPORT_YSCALE_11
+ 0x00000000, // PA_CL_VPORT_YOFFSET_11
+ 0x00000000, // PA_CL_VPORT_ZSCALE_11
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_11
+ 0x00000000, // PA_CL_VPORT_XSCALE_12
+ 0x00000000, // PA_CL_VPORT_XOFFSET_12
+ 0x00000000, // PA_CL_VPORT_YSCALE_12
+ 0x00000000, // PA_CL_VPORT_YOFFSET_12
+ 0x00000000, // PA_CL_VPORT_ZSCALE_12
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_12
+ 0x00000000, // PA_CL_VPORT_XSCALE_13
+ 0x00000000, // PA_CL_VPORT_XOFFSET_13
+ 0x00000000, // PA_CL_VPORT_YSCALE_13
+ 0x00000000, // PA_CL_VPORT_YOFFSET_13
+ 0x00000000, // PA_CL_VPORT_ZSCALE_13
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_13
+ 0x00000000, // PA_CL_VPORT_XSCALE_14
+ 0x00000000, // PA_CL_VPORT_XOFFSET_14
+ 0x00000000, // PA_CL_VPORT_YSCALE_14
+ 0x00000000, // PA_CL_VPORT_YOFFSET_14
+ 0x00000000, // PA_CL_VPORT_ZSCALE_14
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_14
+ 0x00000000, // PA_CL_VPORT_XSCALE_15
+ 0x00000000, // PA_CL_VPORT_XOFFSET_15
+ 0x00000000, // PA_CL_VPORT_YSCALE_15
+ 0x00000000, // PA_CL_VPORT_YOFFSET_15
+ 0x00000000, // PA_CL_VPORT_ZSCALE_15
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_15
+ 0x00000000, // PA_CL_UCP_0_X
+ 0x00000000, // PA_CL_UCP_0_Y
+ 0x00000000, // PA_CL_UCP_0_Z
+ 0x00000000, // PA_CL_UCP_0_W
+ 0x00000000, // PA_CL_UCP_1_X
+ 0x00000000, // PA_CL_UCP_1_Y
+ 0x00000000, // PA_CL_UCP_1_Z
+ 0x00000000, // PA_CL_UCP_1_W
+ 0x00000000, // PA_CL_UCP_2_X
+ 0x00000000, // PA_CL_UCP_2_Y
+ 0x00000000, // PA_CL_UCP_2_Z
+ 0x00000000, // PA_CL_UCP_2_W
+ 0x00000000, // PA_CL_UCP_3_X
+ 0x00000000, // PA_CL_UCP_3_Y
+ 0x00000000, // PA_CL_UCP_3_Z
+ 0x00000000, // PA_CL_UCP_3_W
+ 0x00000000, // PA_CL_UCP_4_X
+ 0x00000000, // PA_CL_UCP_4_Y
+ 0x00000000, // PA_CL_UCP_4_Z
+ 0x00000000, // PA_CL_UCP_4_W
+ 0x00000000, // PA_CL_UCP_5_X
+ 0x00000000, // PA_CL_UCP_5_Y
+ 0x00000000, // PA_CL_UCP_5_Z
+ 0x00000000, // PA_CL_UCP_5_W
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_CNTL_0
+ 0x00000000, // SPI_PS_INPUT_CNTL_1
+ 0x00000000, // SPI_PS_INPUT_CNTL_2
+ 0x00000000, // SPI_PS_INPUT_CNTL_3
+ 0x00000000, // SPI_PS_INPUT_CNTL_4
+ 0x00000000, // SPI_PS_INPUT_CNTL_5
+ 0x00000000, // SPI_PS_INPUT_CNTL_6
+ 0x00000000, // SPI_PS_INPUT_CNTL_7
+ 0x00000000, // SPI_PS_INPUT_CNTL_8
+ 0x00000000, // SPI_PS_INPUT_CNTL_9
+ 0x00000000, // SPI_PS_INPUT_CNTL_10
+ 0x00000000, // SPI_PS_INPUT_CNTL_11
+ 0x00000000, // SPI_PS_INPUT_CNTL_12
+ 0x00000000, // SPI_PS_INPUT_CNTL_13
+ 0x00000000, // SPI_PS_INPUT_CNTL_14
+ 0x00000000, // SPI_PS_INPUT_CNTL_15
+ 0x00000000, // SPI_PS_INPUT_CNTL_16
+ 0x00000000, // SPI_PS_INPUT_CNTL_17
+ 0x00000000, // SPI_PS_INPUT_CNTL_18
+ 0x00000000, // SPI_PS_INPUT_CNTL_19
+ 0x00000000, // SPI_PS_INPUT_CNTL_20
+ 0x00000000, // SPI_PS_INPUT_CNTL_21
+ 0x00000000, // SPI_PS_INPUT_CNTL_22
+ 0x00000000, // SPI_PS_INPUT_CNTL_23
+ 0x00000000, // SPI_PS_INPUT_CNTL_24
+ 0x00000000, // SPI_PS_INPUT_CNTL_25
+ 0x00000000, // SPI_PS_INPUT_CNTL_26
+ 0x00000000, // SPI_PS_INPUT_CNTL_27
+ 0x00000000, // SPI_PS_INPUT_CNTL_28
+ 0x00000000, // SPI_PS_INPUT_CNTL_29
+ 0x00000000, // SPI_PS_INPUT_CNTL_30
+ 0x00000000, // SPI_PS_INPUT_CNTL_31
+ 0x00000000, // SPI_VS_OUT_CONFIG
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_ENA
+ 0x00000000, // SPI_PS_INPUT_ADDR
+ 0x00000000, // SPI_INTERP_CONTROL_0
+ 0x00000002, // SPI_PS_IN_CONTROL
+ 0, // HOLE
+ 0x00000000, // SPI_BARYC_CNTL
+ 0, // HOLE
+ 0x00000000, // SPI_TMPRING_SIZE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_WAVE_MGMT_1
+ 0x00000000, // SPI_WAVE_MGMT_2
+ 0x00000000, // SPI_SHADER_POS_FORMAT
+ 0x00000000, // SPI_SHADER_Z_FORMAT
+ 0x00000000, // SPI_SHADER_COL_FORMAT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_BLEND0_CONTROL
+ 0x00000000, // CB_BLEND1_CONTROL
+ 0x00000000, // CB_BLEND2_CONTROL
+ 0x00000000, // CB_BLEND3_CONTROL
+ 0x00000000, // CB_BLEND4_CONTROL
+ 0x00000000, // CB_BLEND5_CONTROL
+ 0x00000000, // CB_BLEND6_CONTROL
+ 0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 si_SECT_CONTEXT_def_3[] =
+{
+ 0x00000000, // PA_CL_POINT_X_RAD
+ 0x00000000, // PA_CL_POINT_Y_RAD
+ 0x00000000, // PA_CL_POINT_SIZE
+ 0x00000000, // PA_CL_POINT_CULL_RAD
+ 0x00000000, // VGT_DMA_BASE_HI
+ 0x00000000, // VGT_DMA_BASE
+};
+static const u32 si_SECT_CONTEXT_def_4[] =
+{
+ 0x00000000, // DB_DEPTH_CONTROL
+ 0x00000000, // DB_EQAA
+ 0x00000000, // CB_COLOR_CONTROL
+ 0x00000000, // DB_SHADER_CONTROL
+ 0x00090000, // PA_CL_CLIP_CNTL
+ 0x00000004, // PA_SU_SC_MODE_CNTL
+ 0x00000000, // PA_CL_VTE_CNTL
+ 0x00000000, // PA_CL_VS_OUT_CNTL
+ 0x00000000, // PA_CL_NANINF_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+ 0x00000000, // PA_SU_PRIM_FILTER_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SU_POINT_SIZE
+ 0x00000000, // PA_SU_POINT_MINMAX
+ 0x00000000, // PA_SU_LINE_CNTL
+ 0x00000000, // PA_SC_LINE_STIPPLE
+ 0x00000000, // VGT_OUTPUT_PATH_CNTL
+ 0x00000000, // VGT_HOS_CNTL
+ 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+ 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+ 0x00000000, // VGT_HOS_REUSE_DEPTH
+ 0x00000000, // VGT_GROUP_PRIM_TYPE
+ 0x00000000, // VGT_GROUP_FIRST_DECR
+ 0x00000000, // VGT_GROUP_DECR
+ 0x00000000, // VGT_GROUP_VECT_0_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_CNTL
+ 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+ 0x00000000, // VGT_GS_MODE
+ 0, // HOLE
+ 0x00000000, // PA_SC_MODE_CNTL_0
+ 0x00000000, // PA_SC_MODE_CNTL_1
+ 0x00000000, // VGT_ENHANCE
+ 0x00000100, // VGT_GS_PER_ES
+ 0x00000080, // VGT_ES_PER_GS
+ 0x00000002, // VGT_GS_PER_VS
+ 0x00000000, // VGT_GSVS_RING_OFFSET_1
+ 0x00000000, // VGT_GSVS_RING_OFFSET_2
+ 0x00000000, // VGT_GSVS_RING_OFFSET_3
+ 0x00000000, // VGT_GS_OUT_PRIM_TYPE
+ 0x00000000, // IA_ENHANCE
+};
+static const u32 si_SECT_CONTEXT_def_5[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const u32 si_SECT_CONTEXT_def_6[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const u32 si_SECT_CONTEXT_def_7[] =
+{
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_0
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_1
+ 0x000000ff, // IA_MULTI_VGT_PARAM
+ 0x00000000, // VGT_ESGS_RING_ITEMSIZE
+ 0x00000000, // VGT_GSVS_RING_ITEMSIZE
+ 0x00000000, // VGT_REUSE_OFF
+ 0x00000000, // VGT_VTX_CNT_EN
+ 0x00000000, // DB_HTILE_SURFACE
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE0
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE1
+ 0x00000000, // DB_PRELOAD_CONTROL
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_MAX_VERT_OUT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_SHADER_STAGES_EN
+ 0x00000000, // VGT_LS_HS_CONFIG
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+ 0x00000000, // VGT_TF_PARAM
+ 0x00000000, // DB_ALPHA_TO_MASK
+ 0, // HOLE
+ 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+ 0x00000000, // VGT_GS_INSTANCE_CNT
+ 0x00000000, // VGT_STRMOUT_CONFIG
+ 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_0
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_1
+ 0x00001000, // PA_SC_LINE_CNTL
+ 0x00000000, // PA_SC_AA_CONFIG
+ 0x00000005, // PA_SU_VTX_CNTL
+ 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+ 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+ 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+ 0x00000010, // VGT_OUT_DEALLOC_CNTL
+ 0x00000000, // CB_COLOR0_BASE
+ 0x00000000, // CB_COLOR0_PITCH
+ 0x00000000, // CB_COLOR0_SLICE
+ 0x00000000, // CB_COLOR0_VIEW
+ 0x00000000, // CB_COLOR0_INFO
+ 0x00000000, // CB_COLOR0_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_CMASK
+ 0x00000000, // CB_COLOR0_CMASK_SLICE
+ 0x00000000, // CB_COLOR0_FMASK
+ 0x00000000, // CB_COLOR0_FMASK_SLICE
+ 0x00000000, // CB_COLOR0_CLEAR_WORD0
+ 0x00000000, // CB_COLOR0_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_BASE
+ 0x00000000, // CB_COLOR1_PITCH
+ 0x00000000, // CB_COLOR1_SLICE
+ 0x00000000, // CB_COLOR1_VIEW
+ 0x00000000, // CB_COLOR1_INFO
+ 0x00000000, // CB_COLOR1_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_CMASK
+ 0x00000000, // CB_COLOR1_CMASK_SLICE
+ 0x00000000, // CB_COLOR1_FMASK
+ 0x00000000, // CB_COLOR1_FMASK_SLICE
+ 0x00000000, // CB_COLOR1_CLEAR_WORD0
+ 0x00000000, // CB_COLOR1_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_BASE
+ 0x00000000, // CB_COLOR2_PITCH
+ 0x00000000, // CB_COLOR2_SLICE
+ 0x00000000, // CB_COLOR2_VIEW
+ 0x00000000, // CB_COLOR2_INFO
+ 0x00000000, // CB_COLOR2_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_CMASK
+ 0x00000000, // CB_COLOR2_CMASK_SLICE
+ 0x00000000, // CB_COLOR2_FMASK
+ 0x00000000, // CB_COLOR2_FMASK_SLICE
+ 0x00000000, // CB_COLOR2_CLEAR_WORD0
+ 0x00000000, // CB_COLOR2_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_BASE
+ 0x00000000, // CB_COLOR3_PITCH
+ 0x00000000, // CB_COLOR3_SLICE
+ 0x00000000, // CB_COLOR3_VIEW
+ 0x00000000, // CB_COLOR3_INFO
+ 0x00000000, // CB_COLOR3_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_CMASK
+ 0x00000000, // CB_COLOR3_CMASK_SLICE
+ 0x00000000, // CB_COLOR3_FMASK
+ 0x00000000, // CB_COLOR3_FMASK_SLICE
+ 0x00000000, // CB_COLOR3_CLEAR_WORD0
+ 0x00000000, // CB_COLOR3_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_BASE
+ 0x00000000, // CB_COLOR4_PITCH
+ 0x00000000, // CB_COLOR4_SLICE
+ 0x00000000, // CB_COLOR4_VIEW
+ 0x00000000, // CB_COLOR4_INFO
+ 0x00000000, // CB_COLOR4_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_CMASK
+ 0x00000000, // CB_COLOR4_CMASK_SLICE
+ 0x00000000, // CB_COLOR4_FMASK
+ 0x00000000, // CB_COLOR4_FMASK_SLICE
+ 0x00000000, // CB_COLOR4_CLEAR_WORD0
+ 0x00000000, // CB_COLOR4_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_BASE
+ 0x00000000, // CB_COLOR5_PITCH
+ 0x00000000, // CB_COLOR5_SLICE
+ 0x00000000, // CB_COLOR5_VIEW
+ 0x00000000, // CB_COLOR5_INFO
+ 0x00000000, // CB_COLOR5_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_CMASK
+ 0x00000000, // CB_COLOR5_CMASK_SLICE
+ 0x00000000, // CB_COLOR5_FMASK
+ 0x00000000, // CB_COLOR5_FMASK_SLICE
+ 0x00000000, // CB_COLOR5_CLEAR_WORD0
+ 0x00000000, // CB_COLOR5_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_BASE
+ 0x00000000, // CB_COLOR6_PITCH
+ 0x00000000, // CB_COLOR6_SLICE
+ 0x00000000, // CB_COLOR6_VIEW
+ 0x00000000, // CB_COLOR6_INFO
+ 0x00000000, // CB_COLOR6_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_CMASK
+ 0x00000000, // CB_COLOR6_CMASK_SLICE
+ 0x00000000, // CB_COLOR6_FMASK
+ 0x00000000, // CB_COLOR6_FMASK_SLICE
+ 0x00000000, // CB_COLOR6_CLEAR_WORD0
+ 0x00000000, // CB_COLOR6_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_BASE
+ 0x00000000, // CB_COLOR7_PITCH
+ 0x00000000, // CB_COLOR7_SLICE
+ 0x00000000, // CB_COLOR7_VIEW
+ 0x00000000, // CB_COLOR7_INFO
+ 0x00000000, // CB_COLOR7_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_CMASK
+ 0x00000000, // CB_COLOR7_CMASK_SLICE
+ 0x00000000, // CB_COLOR7_FMASK
+ 0x00000000, // CB_COLOR7_FMASK_SLICE
+ 0x00000000, // CB_COLOR7_CLEAR_WORD0
+ 0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
+{
+ {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+ {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+ {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+ {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+ {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
+ {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+ {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+ { NULL, 0, 0 }
+};
+static const struct cs_section_def si_cs_data[] = {
+ { si_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
new file mode 100644
index 000000000000..895c8e2353e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK 0x196c
+#define SI_DC_GPIO_HPD_A 0x196d
+#define SI_DC_GPIO_HPD_EN 0x196e
+#define SI_DC_GPIO_HPD_Y 0x196f
+
+#define SI_GRPH_CONTROL 0x1a01
+# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define SI_GRPH_DEPTH_8BPP 0
+# define SI_GRPH_DEPTH_16BPP 1
+# define SI_GRPH_DEPTH_32BPP 2
+# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define SI_ADDR_SURF_2_BANK 0
+# define SI_ADDR_SURF_4_BANK 1
+# define SI_ADDR_SURF_8_BANK 2
+# define SI_ADDR_SURF_16_BANK 3
+# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
+# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define SI_ADDR_SURF_BANK_WIDTH_1 0
+# define SI_ADDR_SURF_BANK_WIDTH_2 1
+# define SI_ADDR_SURF_BANK_WIDTH_4 2
+# define SI_ADDR_SURF_BANK_WIDTH_8 3
+# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define SI_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define SI_GRPH_FORMAT_ARGB1555 0
+# define SI_GRPH_FORMAT_ARGB565 1
+# define SI_GRPH_FORMAT_ARGB4444 2
+# define SI_GRPH_FORMAT_AI88 3
+# define SI_GRPH_FORMAT_MONO16 4
+# define SI_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define SI_GRPH_FORMAT_ARGB8888 0
+# define SI_GRPH_FORMAT_ARGB2101010 1
+# define SI_GRPH_FORMAT_32BPP_DIG 2
+# define SI_GRPH_FORMAT_8B_ARGB2101010 3
+# define SI_GRPH_FORMAT_BGRA1010102 4
+# define SI_GRPH_FORMAT_8B_BGRA1010102 5
+# define SI_GRPH_FORMAT_RGB111110 6
+# define SI_GRPH_FORMAT_BGR101111 7
+# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define SI_ADDR_SURF_BANK_HEIGHT_1 0
+# define SI_ADDR_SURF_BANK_HEIGHT_2 1
+# define SI_ADDR_SURF_BANK_HEIGHT_4 2
+# define SI_ADDR_SURF_BANK_HEIGHT_8 3
+# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define SI_ADDR_SURF_TILE_SPLIT_64B 0
+# define SI_ADDR_SURF_TILE_SPLIT_128B 1
+# define SI_ADDR_SURF_TILE_SPLIT_256B 2
+# define SI_ADDR_SURF_TILE_SPLIT_512B 3
+# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
+# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
+# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
+# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
+# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
+# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
+# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
+# define SI_ADDR_SURF_P2 0
+# define SI_ADDR_SURF_P4_8x16 4
+# define SI_ADDR_SURF_P4_16x16 5
+# define SI_ADDR_SURF_P4_16x32 6
+# define SI_ADDR_SURF_P4_32x32 7
+# define SI_ADDR_SURF_P8_16x16_8x16 8
+# define SI_ADDR_SURF_P8_16x32_8x16 9
+# define SI_ADDR_SURF_P8_32x32_8x16 10
+# define SI_ADDR_SURF_P8_16x32_16x16 11
+# define SI_ADDR_SURF_P8_32x32_16x16 12
+# define SI_ADDR_SURF_P8_32x32_16x32 13
+# define SI_ADDR_SURF_P8_32x64_32x32 14
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
new file mode 100644
index 000000000000..c57eff159374
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
@@ -0,0 +1,2461 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
+#define SI_MAX_SH_GPRS 256
+#define SI_MAX_TEMP_GPRS 16
+#define SI_MAX_SH_THREADS 256
+#define SI_MAX_SH_STACK_ENTRIES 4096
+#define SI_MAX_FRC_EOV_CNT 16384
+#define SI_MAX_BACKENDS 8
+#define SI_MAX_BACKENDS_MASK 0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
+#define SI_MAX_SIMDS 12
+#define SI_MAX_SIMDS_MASK 0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
+#define SI_MAX_PIPES 8
+#define SI_MAX_PIPES_MASK 0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
+#define SI_MAX_LDS_NUM 0xFFFF
+#define SI_MAX_TCC 16
+#define SI_MAX_TCC_MASK 0xFFFF
+
+#define AMDGPU_NUM_OF_VMIDS 8
+
+/* SMC IND accessor regs */
+#define SMC_IND_INDEX_0 0x80
+#define SMC_IND_DATA_0 0x81
+
+#define SMC_IND_ACCESS_CNTL 0x8A
+# define AUTO_INCREMENT_IND_0 (1 << 0)
+#define SMC_MESSAGE_0 0x8B
+#define SMC_RESP_0 0x8C
+
+/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
+#define SMC_CG_IND_START 0xc0030000
+#define SMC_CG_IND_END 0xc0040000
+
+#define CG_CGTT_LOCAL_0 0x400
+#define CG_CGTT_LOCAL_1 0x401
+
+/* SMC IND registers */
+#define SMC_SYSCON_RESET_CNTL 0x80000000
+# define RST_REG (1 << 0)
+#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
+# define CK_DISABLE (1 << 0)
+# define CKEN (1 << 24)
+
+#define VGA_HDP_CONTROL 0xCA
+#define VGA_MEMORY_DISABLE (1 << 4)
+
+#define DCCG_DISP_SLOW_SELECT_REG 0x13F
+#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
+#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
+#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
+#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
+#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
+#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
+
+#define CG_SPLL_FUNC_CNTL 0x180
+#define SPLL_RESET (1 << 0)
+#define SPLL_SLEEP (1 << 1)
+#define SPLL_BYPASS_EN (1 << 3)
+#define SPLL_REF_DIV(x) ((x) << 4)
+#define SPLL_REF_DIV_MASK (0x3f << 4)
+#define SPLL_PDIV_A(x) ((x) << 20)
+#define SPLL_PDIV_A_MASK (0x7f << 20)
+#define SPLL_PDIV_A_SHIFT 20
+#define CG_SPLL_FUNC_CNTL_2 0x181
+#define SCLK_MUX_SEL(x) ((x) << 0)
+#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define SPLL_CTLREQ_CHG (1 << 23)
+#define SCLK_MUX_UPDATE (1 << 26)
+#define CG_SPLL_FUNC_CNTL_3 0x182
+#define SPLL_FB_DIV(x) ((x) << 0)
+#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
+#define SPLL_FB_DIV_SHIFT 0
+#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_FUNC_CNTL_4 0x183
+
+#define SPLL_STATUS 0x185
+#define SPLL_CHG_STATUS (1 << 1)
+#define SPLL_CNTL_MODE 0x186
+#define SPLL_SW_DIR_CONTROL (1 << 0)
+# define SPLL_REFCLK_SEL(x) ((x) << 26)
+# define SPLL_REFCLK_SEL_MASK (3 << 26)
+
+#define CG_SPLL_SPREAD_SPECTRUM 0x188
+#define SSEN (1 << 0)
+#define CLK_S(x) ((x) << 4)
+#define CLK_S_MASK (0xfff << 4)
+#define CLK_S_SHIFT 4
+#define CG_SPLL_SPREAD_SPECTRUM_2 0x189
+#define CLK_V(x) ((x) << 0)
+#define CLK_V_MASK (0x3ffffff << 0)
+#define CLK_V_SHIFT 0
+
+#define CG_SPLL_AUTOSCALE_CNTL 0x18b
+# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x18d
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_VCO_MODE_MASK 0x00000600
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x18e
+# define UPLL_PDIV_A(x) ((x) << 0)
+# define UPLL_PDIV_A_MASK 0x0000007F
+# define UPLL_PDIV_B(x) ((x) << 8)
+# define UPLL_PDIV_B_MASK 0x00007F00
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x18f
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4 0x191
+# define UPLL_SPARE_ISPARE9 0x00020000
+#define CG_UPLL_FUNC_CNTL_5 0x192
+# define RESET_ANTI_MUX_MASK 0x00000200
+#define CG_UPLL_SPREAD_SPECTRUM 0x194
+# define SSEN_MASK 0x00000001
+
+#define MPLL_BYPASSCLK_SEL 0x197
+# define MPLL_CLKOUT_SEL(x) ((x) << 8)
+# define MPLL_CLKOUT_SEL_MASK 0xFF00
+
+#define CG_CLKPIN_CNTL 0x198
+# define XTALIN_DIVIDE (1 << 1)
+# define BCLK_AS_XCLK (1 << 2)
+#define CG_CLKPIN_CNTL_2 0x199
+# define FORCE_BIF_REFCLK_EN (1 << 3)
+# define MUX_TCLK_TO_XCLK (1 << 8)
+
+#define THM_CLK_CNTL 0x19b
+# define CMON_CLK_SEL(x) ((x) << 0)
+# define CMON_CLK_SEL_MASK 0xFF
+# define TMON_CLK_SEL(x) ((x) << 8)
+# define TMON_CLK_SEL_MASK 0xFF00
+#define MISC_CLK_CNTL 0x19c
+# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
+# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
+# define ZCLK_SEL(x) ((x) << 8)
+# define ZCLK_SEL_MASK 0xFF00
+
+#define CG_THERMAL_CTRL 0x1c0
+#define DPM_EVENT_SRC(x) ((x) << 0)
+#define DPM_EVENT_SRC_MASK (7 << 0)
+#define DIG_THERM_DPM(x) ((x) << 14)
+#define DIG_THERM_DPM_MASK 0x003FC000
+#define DIG_THERM_DPM_SHIFT 14
+#define CG_THERMAL_STATUS 0x1c1
+#define FDO_PWM_DUTY(x) ((x) << 9)
+#define FDO_PWM_DUTY_MASK (0xff << 9)
+#define FDO_PWM_DUTY_SHIFT 9
+#define CG_THERMAL_INT 0x1c2
+#define DIG_THERM_INTH(x) ((x) << 8)
+#define DIG_THERM_INTH_MASK 0x0000FF00
+#define DIG_THERM_INTH_SHIFT 8
+#define DIG_THERM_INTL(x) ((x) << 16)
+#define DIG_THERM_INTL_MASK 0x00FF0000
+#define DIG_THERM_INTL_SHIFT 16
+#define THERM_INT_MASK_HIGH (1 << 24)
+#define THERM_INT_MASK_LOW (1 << 25)
+
+#define CG_MULT_THERMAL_CTRL 0x1c4
+#define TEMP_SEL(x) ((x) << 20)
+#define TEMP_SEL_MASK (0xff << 20)
+#define TEMP_SEL_SHIFT 20
+#define CG_MULT_THERMAL_STATUS 0x1c5
+#define ASIC_MAX_TEMP(x) ((x) << 0)
+#define ASIC_MAX_TEMP_MASK 0x000001ff
+#define ASIC_MAX_TEMP_SHIFT 0
+#define CTF_TEMP(x) ((x) << 9)
+#define CTF_TEMP_MASK 0x0003fe00
+#define CTF_TEMP_SHIFT 9
+
+#define CG_FDO_CTRL0 0x1d5
+#define FDO_STATIC_DUTY(x) ((x) << 0)
+#define FDO_STATIC_DUTY_MASK 0x000000FF
+#define FDO_STATIC_DUTY_SHIFT 0
+#define CG_FDO_CTRL1 0x1d6
+#define FMAX_DUTY100(x) ((x) << 0)
+#define FMAX_DUTY100_MASK 0x000000FF
+#define FMAX_DUTY100_SHIFT 0
+#define CG_FDO_CTRL2 0x1d7
+#define TMIN(x) ((x) << 0)
+#define TMIN_MASK 0x000000FF
+#define TMIN_SHIFT 0
+#define FDO_PWM_MODE(x) ((x) << 11)
+#define FDO_PWM_MODE_MASK (7 << 11)
+#define FDO_PWM_MODE_SHIFT 11
+#define TACH_PWM_RESP_RATE(x) ((x) << 25)
+#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
+#define TACH_PWM_RESP_RATE_SHIFT 25
+
+#define CG_TACH_CTRL 0x1dc
+# define EDGE_PER_REV(x) ((x) << 0)
+# define EDGE_PER_REV_MASK (0x7 << 0)
+# define EDGE_PER_REV_SHIFT 0
+# define TARGET_PERIOD(x) ((x) << 3)
+# define TARGET_PERIOD_MASK 0xfffffff8
+# define TARGET_PERIOD_SHIFT 3
+#define CG_TACH_STATUS 0x1dd
+# define TACH_PERIOD(x) ((x) << 0)
+# define TACH_PERIOD_MASK 0xffffffff
+# define TACH_PERIOD_SHIFT 0
+
+#define GENERAL_PWRMGT 0x1e0
+# define GLOBAL_PWRMGT_EN (1 << 0)
+# define STATIC_PM_EN (1 << 1)
+# define THERMAL_PROTECTION_DIS (1 << 2)
+# define THERMAL_PROTECTION_TYPE (1 << 3)
+# define SW_SMIO_INDEX(x) ((x) << 6)
+# define SW_SMIO_INDEX_MASK (1 << 6)
+# define SW_SMIO_INDEX_SHIFT 6
+# define VOLT_PWRMGT_EN (1 << 10)
+# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
+#define CG_TPC 0x1e1
+#define SCLK_PWRMGT_CNTL 0x1e2
+# define SCLK_PWRMGT_OFF (1 << 0)
+# define SCLK_LOW_D1 (1 << 1)
+# define FIR_RESET (1 << 4)
+# define FIR_FORCE_TREND_SEL (1 << 5)
+# define FIR_TREND_MODE (1 << 6)
+# define DYN_GFX_CLK_OFF_EN (1 << 7)
+# define GFX_CLK_FORCE_ON (1 << 8)
+# define GFX_CLK_REQUEST_OFF (1 << 9)
+# define GFX_CLK_FORCE_OFF (1 << 10)
+# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
+# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
+# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
+# define DYN_LIGHT_SLEEP_EN (1 << 14)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0x1e6
+# define CURRENT_STATE_INDEX_MASK (0xf << 4)
+# define CURRENT_STATE_INDEX_SHIFT 4
+
+#define CG_FTV 0x1ef
+
+#define CG_FFCT_0 0x1f0
+# define UTC_0(x) ((x) << 0)
+# define UTC_0_MASK (0x3ff << 0)
+# define DTC_0(x) ((x) << 10)
+# define DTC_0_MASK (0x3ff << 10)
+
+#define CG_BSP 0x1ff
+# define BSP(x) ((x) << 0)
+# define BSP_MASK (0xffff << 0)
+# define BSU(x) ((x) << 16)
+# define BSU_MASK (0xf << 16)
+#define CG_AT 0x200
+# define CG_R(x) ((x) << 0)
+# define CG_R_MASK (0xffff << 0)
+# define CG_L(x) ((x) << 16)
+# define CG_L_MASK (0xffff << 16)
+
+#define CG_GIT 0x201
+# define CG_GICST(x) ((x) << 0)
+# define CG_GICST_MASK (0xffff << 0)
+# define CG_GIPOT(x) ((x) << 16)
+# define CG_GIPOT_MASK (0xffff << 16)
+
+#define CG_SSP 0x203
+# define SST(x) ((x) << 0)
+# define SST_MASK (0xffff << 0)
+# define SSTU(x) ((x) << 16)
+# define SSTU_MASK (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL 0x20a
+# define DISP1_GAP(x) ((x) << 0)
+# define DISP1_GAP_MASK (3 << 0)
+# define DISP2_GAP(x) ((x) << 2)
+# define DISP2_GAP_MASK (3 << 2)
+# define VBI_TIMER_COUNT(x) ((x) << 4)
+# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
+# define VBI_TIMER_UNIT(x) ((x) << 20)
+# define VBI_TIMER_UNIT_MASK (7 << 20)
+# define DISP1_GAP_MCHG(x) ((x) << 24)
+# define DISP1_GAP_MCHG_MASK (3 << 24)
+# define DISP2_GAP_MCHG(x) ((x) << 26)
+# define DISP2_GAP_MCHG_MASK (3 << 26)
+
+#define CG_ULV_CONTROL 0x21e
+#define CG_ULV_PARAMETER 0x21f
+
+#define SMC_SCRATCH0 0x221
+
+#define CG_CAC_CTRL 0x22e
+# define CAC_WINDOW(x) ((x) << 0)
+# define CAC_WINDOW_MASK 0x00ffffff
+
+#define DMIF_ADDR_CONFIG 0x2F5
+
+#define DMIF_ADDR_CALC 0x300
+
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0328
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
+#define SRBM_STATUS 0x394
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define IH_BUSY (1 << 17)
+
+#define SRBM_SOFT_RESET 0x398
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3A0
+#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3A1
+
+#define SRBM_READ_ERROR 0x3A6
+#define SRBM_INT_CNTL 0x3A8
+#define SRBM_INT_ACK 0x3AA
+
+#define SRBM_STATUS2 0x3B1
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
+#define VM_L2_CNTL 0x500
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
+#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
+#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
+#define VM_L2_CNTL2 0x501
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
+#define INVALIDATE_PTE_AND_PDE_CACHES 0
+#define INVALIDATE_ONLY_PTE_CACHES 1
+#define INVALIDATE_ONLY_PDE_CACHES 2
+#define VM_L2_CNTL3 0x502
+#define BANK_SELECT(x) ((x) << 0)
+#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
+#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
+#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
+#define VM_L2_STATUS 0x503
+#define L2_BUSY (1 << 0)
+#define VM_CONTEXT0_CNTL 0x504
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL 0x505
+#define VM_CONTEXT0_CNTL2 0x50C
+#define VM_CONTEXT1_CNTL2 0x50D
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x50E
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x50F
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x510
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x511
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x512
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x513
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x514
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x515
+
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x53f
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x537
+#define PROTECTIONS_MASK (0xf << 0)
+#define PROTECTIONS_SHIFT 0
+ /* bit 0: range
+ * bit 1: pde0
+ * bit 2: valid
+ * bit 3: read
+ * bit 4: write
+ */
+#define MEMORY_CLIENT_ID_MASK (0xff << 12)
+#define MEMORY_CLIENT_ID_SHIFT 12
+#define MEMORY_CLIENT_RW_MASK (1 << 24)
+#define MEMORY_CLIENT_RW_SHIFT 24
+#define FAULT_VMID_MASK (0xf << 25)
+#define FAULT_VMID_SHIFT 25
+
+#define VM_INVALIDATE_REQUEST 0x51E
+#define VM_INVALIDATE_RESPONSE 0x51F
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x546
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x547
+
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54F
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x550
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x551
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x552
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x553
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x554
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x555
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x556
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x557
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x558
+
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x55F
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x560
+
+#define VM_L2_CG 0x570
+#define MC_CG_ENABLE (1 << 18)
+#define MC_LS_ENABLE (1 << 19)
+
+#define MC_SHARED_CHMAP 0x801
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x0000f000
+#define MC_SHARED_CHREMAP 0x802
+
+#define MC_VM_FB_LOCATION 0x809
+#define MC_VM_AGP_TOP 0x80A
+#define MC_VM_AGP_BOT 0x80B
+#define MC_VM_AGP_BASE 0x80C
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x80D
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x80E
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x80F
+
+#define MC_VM_MX_L1_TLB_CNTL 0x819
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL 0x82B
+
+#define MC_HUB_MISC_HUB_CG 0x82E
+#define MC_HUB_MISC_VM_CG 0x82F
+
+#define MC_HUB_MISC_SIP_CG 0x830
+
+#define MC_XPB_CLK_GAT 0x91E
+
+#define MC_CITF_MISC_RD_CG 0x992
+#define MC_CITF_MISC_WR_CG 0x993
+#define MC_CITF_MISC_VM_CG 0x994
+
+#define MC_ARB_RAMCFG 0x9D8
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define NOOFGROUPS_SHIFT 12
+#define NOOFGROUPS_MASK 0x00001000
+
+#define MC_ARB_DRAM_TIMING 0x9DD
+#define MC_ARB_DRAM_TIMING2 0x9DE
+
+#define MC_ARB_BURST_TIME 0xA02
+#define STATE0(x) ((x) << 0)
+#define STATE0_MASK (0x1f << 0)
+#define STATE0_SHIFT 0
+#define STATE1(x) ((x) << 5)
+#define STATE1_MASK (0x1f << 5)
+#define STATE1_SHIFT 5
+#define STATE2(x) ((x) << 10)
+#define STATE2_MASK (0x1f << 10)
+#define STATE2_SHIFT 10
+#define STATE3(x) ((x) << 15)
+#define STATE3_MASK (0x1f << 15)
+#define STATE3_SHIFT 15
+
+#define MC_SEQ_TRAIN_WAKEUP_CNTL 0xA3A
+#define TRAIN_DONE_D0 (1 << 30)
+#define TRAIN_DONE_D1 (1 << 31)
+
+#define MC_SEQ_SUP_CNTL 0xA32
+#define RUN_MASK (1 << 0)
+#define MC_SEQ_SUP_PGM 0xA33
+#define MC_PMG_AUTO_CMD 0xA34
+
+#define MC_IO_PAD_CNTL_D0 0xA74
+#define MEM_FALL_OUT_CMD (1 << 8)
+
+#define MC_SEQ_RAS_TIMING 0xA28
+#define MC_SEQ_CAS_TIMING 0xA29
+#define MC_SEQ_MISC_TIMING 0xA2A
+#define MC_SEQ_MISC_TIMING2 0xA2B
+#define MC_SEQ_PMG_TIMING 0xA2C
+#define MC_SEQ_RD_CTL_D0 0xA2D
+#define MC_SEQ_RD_CTL_D1 0xA2E
+#define MC_SEQ_WR_CTL_D0 0xA2F
+#define MC_SEQ_WR_CTL_D1 0xA30
+
+#define MC_SEQ_MISC0 0xA80
+#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
+#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
+#define MC_SEQ_MISC0_VEN_ID_VALUE 3
+#define MC_SEQ_MISC0_REV_ID_SHIFT 12
+#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
+#define MC_SEQ_MISC0_REV_ID_VALUE 1
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_MISC1 0xA81
+#define MC_SEQ_RESERVE_M 0xA82
+#define MC_PMG_CMD_EMRS 0xA83
+
+#define MC_SEQ_IO_DEBUG_INDEX 0xA91
+#define MC_SEQ_IO_DEBUG_DATA 0xA92
+
+#define MC_SEQ_MISC5 0xA95
+#define MC_SEQ_MISC6 0xA96
+
+#define MC_SEQ_MISC7 0xA99
+
+#define MC_SEQ_RAS_TIMING_LP 0xA9B
+#define MC_SEQ_CAS_TIMING_LP 0xA9C
+#define MC_SEQ_MISC_TIMING_LP 0xA9D
+#define MC_SEQ_MISC_TIMING2_LP 0xA9E
+#define MC_SEQ_WR_CTL_D0_LP 0xA9F
+#define MC_SEQ_WR_CTL_D1_LP 0xAA0
+#define MC_SEQ_PMG_CMD_EMRS_LP 0xAA1
+#define MC_SEQ_PMG_CMD_MRS_LP 0xAA2
+
+#define MC_PMG_CMD_MRS 0xAAB
+
+#define MC_SEQ_RD_CTL_D0_LP 0xAC7
+#define MC_SEQ_RD_CTL_D1_LP 0xAC8
+
+#define MC_PMG_CMD_MRS1 0xAD1
+#define MC_SEQ_PMG_CMD_MRS1_LP 0xAD2
+#define MC_SEQ_PMG_TIMING_LP 0xAD3
+
+#define MC_SEQ_WR_CTL_2 0xAD5
+#define MC_SEQ_WR_CTL_2_LP 0xAD6
+#define MC_PMG_CMD_MRS2 0xAD7
+#define MC_SEQ_PMG_CMD_MRS2_LP 0xAD8
+
+#define MCLK_PWRMGT_CNTL 0xAE8
+# define DLL_SPEED(x) ((x) << 0)
+# define DLL_SPEED_MASK (0x1f << 0)
+# define DLL_READY (1 << 6)
+# define MC_INT_CNTL (1 << 7)
+# define MRDCK0_PDNB (1 << 8)
+# define MRDCK1_PDNB (1 << 9)
+# define MRDCK0_RESET (1 << 16)
+# define MRDCK1_RESET (1 << 17)
+# define DLL_READY_READ (1 << 24)
+#define DLL_CNTL 0xAE9
+# define MRDCK0_BYPASS (1 << 24)
+# define MRDCK1_BYPASS (1 << 25)
+
+#define MPLL_CNTL_MODE 0xAEC
+# define MPLL_MCLK_SEL (1 << 11)
+#define MPLL_FUNC_CNTL 0xAED
+#define BWCTRL(x) ((x) << 20)
+#define BWCTRL_MASK (0xff << 20)
+#define MPLL_FUNC_CNTL_1 0xAEE
+#define VCO_MODE(x) ((x) << 0)
+#define VCO_MODE_MASK (3 << 0)
+#define CLKFRAC(x) ((x) << 4)
+#define CLKFRAC_MASK (0xfff << 4)
+#define CLKF(x) ((x) << 16)
+#define CLKF_MASK (0xfff << 16)
+#define MPLL_FUNC_CNTL_2 0xAEF
+#define MPLL_AD_FUNC_CNTL 0xAF0
+#define YCLK_POST_DIV(x) ((x) << 0)
+#define YCLK_POST_DIV_MASK (7 << 0)
+#define MPLL_DQ_FUNC_CNTL 0xAF1
+#define YCLK_SEL(x) ((x) << 4)
+#define YCLK_SEL_MASK (1 << 4)
+
+#define MPLL_SS1 0xAF3
+#define CLKV(x) ((x) << 0)
+#define CLKV_MASK (0x3ffffff << 0)
+#define MPLL_SS2 0xAF4
+#define CLKS(x) ((x) << 0)
+#define CLKS_MASK (0xfff << 0)
+
+#define HDP_HOST_PATH_CNTL 0xB00
+#define CLOCK_GATING_DIS (1 << 23)
+#define HDP_NONSURFACE_BASE 0xB01
+#define HDP_NONSURFACE_INFO 0xB02
+#define HDP_NONSURFACE_SIZE 0xB03
+
+#define HDP_DEBUG0 0xBCC
+
+#define HDP_ADDR_CONFIG 0xBD2
+#define HDP_MISC_CNTL 0xBD3
+#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+#define HDP_MEM_POWER_LS 0xBD4
+#define HDP_LS_ENABLE (1 << 0)
+
+#define ATC_MISC_CG 0xCD4
+
+#define IH_RB_CNTL 0xF80
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0xF81
+#define IH_RB_RPTR 0xF82
+#define IH_RB_WPTR 0xF83
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0xF84
+#define IH_RB_WPTR_ADDR_LO 0xF85
+#define IH_CNTL 0xF86
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 1)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+# define MC_VMID(x) ((x) << 25)
+
+#define CONFIG_MEMSIZE 0x150A
+
+#define INTERRUPT_CNTL 0x151A
+# define IH_DUMMY_RD_OVERRIDE (1 << 0)
+# define IH_DUMMY_RD_EN (1 << 1)
+# define IH_REQ_NONSNOOP_EN (1 << 3)
+# define GEN_IH_INT_EN (1 << 8)
+#define INTERRUPT_CNTL2 0x151B
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
+
+#define BIF_FB_EN 0x1524
+#define FB_READ_EN (1 << 0)
+#define FB_WRITE_EN (1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528
+
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX 0x1780
+# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
+# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA 0x1781
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
+# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
+# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0 = invalid
+ * x = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
+# define HBR_CAPABLE (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
+# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
+# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
+# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
+# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
+# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
+# define DESCRIPTION0(x) (((x) & 0xff) << 0)
+# define DESCRIPTION1(x) (((x) & 0xff) << 8)
+# define DESCRIPTION2(x) (((x) & 0xff) << 16)
+# define DESCRIPTION3(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
+# define DESCRIPTION4(x) (((x) & 0xff) << 0)
+# define DESCRIPTION5(x) (((x) & 0xff) << 8)
+# define DESCRIPTION6(x) (((x) & 0xff) << 16)
+# define DESCRIPTION7(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
+# define DESCRIPTION8(x) (((x) & 0xff) << 0)
+# define DESCRIPTION9(x) (((x) & 0xff) << 8)
+# define DESCRIPTION10(x) (((x) & 0xff) << 16)
+# define DESCRIPTION11(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
+# define DESCRIPTION12(x) (((x) & 0xff) << 0)
+# define DESCRIPTION13(x) (((x) & 0xff) << 8)
+# define DESCRIPTION14(x) (((x) & 0xff) << 16)
+# define DESCRIPTION15(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
+# define DESCRIPTION16(x) (((x) & 0xff) << 0)
+# define DESCRIPTION17(x) (((x) & 0xff) << 8)
+
+#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
+# define AUDIO_ENABLED (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
+#define PORT_CONNECTIVITY_MASK (3 << 30)
+#define PORT_CONNECTIVITY_SHIFT 30
+
+#define DC_LB_MEMORY_SPLIT 0x1AC3
+#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
+
+#define PRIORITY_A_CNT 0x1AC6
+#define PRIORITY_MARK_MASK 0x7fff
+#define PRIORITY_OFF (1 << 16)
+#define PRIORITY_ALWAYS_ON (1 << 20)
+#define PRIORITY_B_CNT 0x1AC7
+
+#define DPG_PIPE_ARBITRATION_CONTROL3 0x1B32
+# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
+#define DPG_PIPE_LATENCY_CONTROL 0x1B33
+# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
+# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x1AEE
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x1AEF
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x1AD0
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x183D
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x183E
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x183F
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x1840
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x1853
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x1854
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x1A16
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x1A17
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DAC_AUTODETECT_INT_CONTROL 0x19F2
+
+#define DC_HPD1_INT_STATUS 0x1807
+#define DC_HPD2_INT_STATUS 0x180A
+#define DC_HPD3_INT_STATUS 0x180D
+#define DC_HPD4_INT_STATUS 0x1810
+#define DC_HPD5_INT_STATUS 0x1813
+#define DC_HPD6_INT_STATUS 0x1816
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x1808
+#define DC_HPD2_INT_CONTROL 0x180B
+#define DC_HPD3_INT_CONTROL 0x180E
+#define DC_HPD4_INT_CONTROL 0x1811
+#define DC_HPD5_INT_CONTROL 0x1814
+#define DC_HPD6_INT_CONTROL 0x1817
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x1809
+#define DC_HPD2_CONTROL 0x180C
+#define DC_HPD3_CONTROL 0x180F
+#define DC_HPD4_CONTROL 0x1812
+#define DC_HPD5_CONTROL 0x1815
+#define DC_HPD6_CONTROL 0x1818
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+#define DPG_PIPE_STUTTER_CONTROL 0x1B35
+# define STUTTER_ENABLE (1 << 0)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x1BA6
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE 0x05ac
+# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE 0x05b0
+#define DCCG_AUDIO_DTO0_MODULE 0x05b4
+#define DCCG_AUDIO_DTO1_PHASE 0x05c0
+#define DCCG_AUDIO_DTO1_MODULE 0x05c4
+
+#define AFMT_AUDIO_SRC_CONTROL 0x1c4f
+#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
+#define GRBM_CNTL 0x2000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+
+#define GRBM_STATUS2 0x2002
+#define RLC_RQ_PENDING (1 << 0)
+#define RLC_BUSY (1 << 8)
+#define TC_BUSY (1 << 9)
+
+#define GRBM_STATUS 0x2004
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define RING2_RQ_PENDING (1 << 4)
+#define SRBM_RQ_PENDING (1 << 5)
+#define RING1_RQ_PENDING (1 << 6)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GDS_DMA_RQ_PENDING (1 << 9)
+#define GRBM_EE_BUSY (1 << 10)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define GDS_BUSY (1 << 15)
+#define VGT_BUSY (1 << 17)
+#define IA_BUSY_NO_DMA (1 << 18)
+#define IA_BUSY (1 << 19)
+#define SX_BUSY (1 << 20)
+#define SPI_BUSY (1 << 22)
+#define BCI_BUSY (1 << 23)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x2005
+#define GRBM_STATUS_SE1 0x2006
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_BCI_BUSY (1 << 22)
+#define SE_VGT_BUSY (1 << 23)
+#define SE_PA_BUSY (1 << 24)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+
+#define GRBM_SOFT_RESET 0x2008
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_RLC (1 << 2)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_GDS (1 << 4)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_BCI (1 << 7)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VGT (1 << 14)
+#define SOFT_RESET_IA (1 << 15)
+
+#define GRBM_GFX_INDEX 0x200B
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SH_INDEX(x) ((x) << 8)
+#define SE_INDEX(x) ((x) << 16)
+#define SH_BROADCAST_WRITES (1 << 29)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
+#define GRBM_INT_CNTL 0x2018
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+#define CP_STRMOUT_CNTL 0x213F
+#define SCRATCH_REG0 0x2140
+#define SCRATCH_REG1 0x2141
+#define SCRATCH_REG2 0x2142
+#define SCRATCH_REG3 0x2143
+#define SCRATCH_REG4 0x2144
+#define SCRATCH_REG5 0x2145
+#define SCRATCH_REG6 0x2146
+#define SCRATCH_REG7 0x2147
+
+#define SCRATCH_UMSK 0x2150
+#define SCRATCH_ADDR 0x2151
+
+#define CP_SEM_WAIT_TIMER 0x216F
+
+#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
+
+#define CP_ME_CNTL 0x21B6
+#define CP_CE_HALT (1 << 24)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_HALT (1 << 28)
+
+#define CP_COHER_CNTL2 0x217A
+
+#define CP_RB2_RPTR 0x21BE
+#define CP_RB1_RPTR 0x21BF
+#define CP_RB0_RPTR 0x21C0
+#define CP_RB_WPTR_DELAY 0x21C1
+
+#define CP_QUEUE_THRESHOLDS 0x21D8
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_MEQ_THRESHOLDS 0x21D9
+#define MEQ1_START(x) ((x) << 0)
+#define MEQ2_START(x) ((x) << 8)
+
+#define CP_PERFMON_CNTL 0x21FF
+
+#define VGT_VTX_VECT_EJECT_REG 0x222C
+
+#define VGT_CACHE_INVALIDATION 0x2231
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_ESGS_RING_SIZE 0x2232
+#define VGT_GSVS_RING_SIZE 0x2233
+
+#define VGT_GS_VERTEX_REUSE 0x2235
+
+#define VGT_PRIMITIVE_TYPE 0x2256
+#define VGT_INDEX_TYPE 0x2257
+
+#define VGT_NUM_INDICES 0x225C
+#define VGT_NUM_INSTANCES 0x225D
+
+#define VGT_TF_RING_SIZE 0x2262
+
+#define VGT_HS_OFFCHIP_PARAM 0x226C
+
+#define VGT_TF_MEMORY_BASE 0x226E
+
+#define CC_GC_SHADER_ARRAY_CONFIG 0x226F
+#define INACTIVE_CUS_MASK 0xFFFF0000
+#define INACTIVE_CUS_SHIFT 16
+#define GC_USER_SHADER_ARRAY_CONFIG 0x2270
+
+#define PA_CL_ENHANCE 0x2285
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+
+#define PA_SU_LINE_STIPPLE_VALUE 0x2298
+
+#define PA_SC_LINE_STIPPLE_STATE 0x22C4
+
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x22C9
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+
+#define PA_SC_FIFO_SIZE 0x22F3
+#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
+
+#define PA_SC_ENHANCE 0x22FC
+
+#define SQ_CONFIG 0x2300
+
+#define SQC_CACHES 0x2302
+
+#define SQ_POWER_THROTTLE 0x2396
+#define MIN_POWER(x) ((x) << 0)
+#define MIN_POWER_MASK (0x3fff << 0)
+#define MIN_POWER_SHIFT 0
+#define MAX_POWER(x) ((x) << 16)
+#define MAX_POWER_MASK (0x3fff << 16)
+#define MAX_POWER_SHIFT 0
+#define SQ_POWER_THROTTLE2 0x2397
+#define MAX_POWER_DELTA(x) ((x) << 0)
+#define MAX_POWER_DELTA_MASK (0x3fff << 0)
+#define MAX_POWER_DELTA_SHIFT 0
+#define STI_SIZE(x) ((x) << 16)
+#define STI_SIZE_MASK (0x3ff << 16)
+#define STI_SIZE_SHIFT 16
+#define LTI_RATIO(x) ((x) << 27)
+#define LTI_RATIO_MASK (0xf << 27)
+#define LTI_RATIO_SHIFT 27
+
+#define SX_DEBUG_1 0x2418
+
+#define SPI_STATIC_THREAD_MGMT_1 0x2438
+#define SPI_STATIC_THREAD_MGMT_2 0x2439
+#define SPI_STATIC_THREAD_MGMT_3 0x243A
+#define SPI_PS_MAX_WAVE_ID 0x243B
+
+#define SPI_CONFIG_CNTL 0x2440
+
+#define SPI_CONFIG_CNTL_1 0x244F
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+
+#define CGTS_TCC_DISABLE 0x2452
+#define CGTS_USER_TCC_DISABLE 0x2453
+#define TCC_DISABLE_MASK 0xFFFF0000
+#define TCC_DISABLE_SHIFT 16
+#define CGTS_SM_CTRL_REG 0x2454
+#define OVERRIDE (1 << 21)
+#define LS_OVERRIDE (1 << 22)
+
+#define SPI_LB_CU_MASK 0x24D5
+
+#define TA_CNTL_AUX 0x2542
+
+#define CC_RB_BACKEND_DISABLE 0x263D
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x263E
+#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x00000007
+#define NUM_PIPES_SHIFT 0
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
+#define PIPE_INTERLEAVE_SIZE_SHIFT 4
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define NUM_SHADER_ENGINES_MASK 0x00003000
+#define NUM_SHADER_ENGINES_SHIFT 12
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
+#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
+#define NUM_GPUS(x) ((x) << 20)
+#define NUM_GPUS_MASK 0x00700000
+#define NUM_GPUS_SHIFT 20
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
+#define MULTI_GPU_TILE_SIZE_SHIFT 24
+#define ROW_SIZE(x) ((x) << 28)
+#define ROW_SIZE_MASK 0x30000000
+#define ROW_SIZE_SHIFT 28
+
+#define GB_TILE_MODE0 0x2644
+# define MICRO_TILE_MODE(x) ((x) << 0)
+# define ADDR_SURF_DISPLAY_MICRO_TILING 0
+# define ADDR_SURF_THIN_MICRO_TILING 1
+# define ADDR_SURF_DEPTH_MICRO_TILING 2
+# define ARRAY_MODE(x) ((x) << 2)
+# define ARRAY_LINEAR_GENERAL 0
+# define ARRAY_LINEAR_ALIGNED 1
+# define ARRAY_1D_TILED_THIN1 2
+# define ARRAY_2D_TILED_THIN1 4
+# define PIPE_CONFIG(x) ((x) << 6)
+# define ADDR_SURF_P2 0
+# define ADDR_SURF_P4_8x16 4
+# define ADDR_SURF_P4_16x16 5
+# define ADDR_SURF_P4_16x32 6
+# define ADDR_SURF_P4_32x32 7
+# define ADDR_SURF_P8_16x16_8x16 8
+# define ADDR_SURF_P8_16x32_8x16 9
+# define ADDR_SURF_P8_32x32_8x16 10
+# define ADDR_SURF_P8_16x32_16x16 11
+# define ADDR_SURF_P8_32x32_16x16 12
+# define ADDR_SURF_P8_32x32_16x32 13
+# define ADDR_SURF_P8_32x64_32x32 14
+# define TILE_SPLIT(x) ((x) << 11)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define BANK_WIDTH(x) ((x) << 14)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define BANK_HEIGHT(x) ((x) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
+# define MACRO_TILE_ASPECT(x) ((x) << 18)
+# define ADDR_SURF_MACRO_ASPECT_1 0
+# define ADDR_SURF_MACRO_ASPECT_2 1
+# define ADDR_SURF_MACRO_ASPECT_4 2
+# define ADDR_SURF_MACRO_ASPECT_8 3
+# define NUM_BANKS(x) ((x) << 20)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+#define GB_TILE_MODE1 0x2645
+#define GB_TILE_MODE2 0x2646
+#define GB_TILE_MODE3 0x2647
+#define GB_TILE_MODE4 0x2648
+#define GB_TILE_MODE5 0x2649
+#define GB_TILE_MODE6 0x264a
+#define GB_TILE_MODE7 0x264b
+#define GB_TILE_MODE8 0x264c
+#define GB_TILE_MODE9 0x264d
+#define GB_TILE_MODE10 0x264e
+#define GB_TILE_MODE11 0x264f
+#define GB_TILE_MODE12 0x2650
+#define GB_TILE_MODE13 0x2651
+#define GB_TILE_MODE14 0x2652
+#define GB_TILE_MODE15 0x2653
+#define GB_TILE_MODE16 0x2654
+#define GB_TILE_MODE17 0x2655
+#define GB_TILE_MODE18 0x2656
+#define GB_TILE_MODE19 0x2657
+#define GB_TILE_MODE20 0x2658
+#define GB_TILE_MODE21 0x2659
+#define GB_TILE_MODE22 0x265a
+#define GB_TILE_MODE23 0x265b
+#define GB_TILE_MODE24 0x265c
+#define GB_TILE_MODE25 0x265d
+#define GB_TILE_MODE26 0x265e
+#define GB_TILE_MODE27 0x265f
+#define GB_TILE_MODE28 0x2660
+#define GB_TILE_MODE29 0x2661
+#define GB_TILE_MODE30 0x2662
+#define GB_TILE_MODE31 0x2663
+
+#define CB_PERFCOUNTER0_SELECT0 0x2688
+#define CB_PERFCOUNTER0_SELECT1 0x2689
+#define CB_PERFCOUNTER1_SELECT0 0x268A
+#define CB_PERFCOUNTER1_SELECT1 0x268B
+#define CB_PERFCOUNTER2_SELECT0 0x268C
+#define CB_PERFCOUNTER2_SELECT1 0x268D
+#define CB_PERFCOUNTER3_SELECT0 0x268E
+#define CB_PERFCOUNTER3_SELECT1 0x268F
+
+#define CB_CGTT_SCLK_CTRL 0x2698
+
+#define GC_USER_RB_BACKEND_DISABLE 0x26DF
+#define BACKEND_DISABLE_MASK 0x00FF0000
+#define BACKEND_DISABLE_SHIFT 16
+
+#define TCP_CHAN_STEER_LO 0x2B03
+#define TCP_CHAN_STEER_HI 0x2B94
+
+#define CP_RB0_BASE 0x3040
+#define CP_RB0_CNTL 0x3041
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define BUF_SWAP_32BIT (2 << 16)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+
+#define CP_RB0_RPTR_ADDR 0x3043
+#define CP_RB0_RPTR_ADDR_HI 0x3044
+#define CP_RB0_WPTR 0x3045
+
+#define CP_PFP_UCODE_ADDR 0x3054
+#define CP_PFP_UCODE_DATA 0x3055
+#define CP_ME_RAM_RADDR 0x3056
+#define CP_ME_RAM_WADDR 0x3057
+#define CP_ME_RAM_DATA 0x3058
+
+#define CP_CE_UCODE_ADDR 0x305A
+#define CP_CE_UCODE_DATA 0x305B
+
+#define CP_RB1_BASE 0x3060
+#define CP_RB1_CNTL 0x3061
+#define CP_RB1_RPTR_ADDR 0x3062
+#define CP_RB1_RPTR_ADDR_HI 0x3063
+#define CP_RB1_WPTR 0x3064
+#define CP_RB2_BASE 0x3065
+#define CP_RB2_CNTL 0x3066
+#define CP_RB2_RPTR_ADDR 0x3067
+#define CP_RB2_RPTR_ADDR_HI 0x3068
+#define CP_RB2_WPTR 0x3069
+#define CP_INT_CNTL_RING0 0x306A
+#define CP_INT_CNTL_RING1 0x306B
+#define CP_INT_CNTL_RING2 0x306C
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define CP_RINGID2_INT_ENABLE (1 << 29)
+# define CP_RINGID1_INT_ENABLE (1 << 30)
+# define CP_RINGID0_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS_RING0 0x306D
+#define CP_INT_STATUS_RING1 0x306E
+#define CP_INT_STATUS_RING2 0x306F
+# define WAIT_MEM_SEM_INT_STAT (1 << 21)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define CP_RINGID2_INT_STAT (1 << 29)
+# define CP_RINGID1_INT_STAT (1 << 30)
+# define CP_RINGID0_INT_STAT (1 << 31)
+
+#define CP_MEM_SLP_CNTL 0x3079
+# define CP_MEM_LS_EN (1 << 0)
+
+#define CP_DEBUG 0x307F
+
+#define RLC_CNTL 0x30C0
+# define RLC_ENABLE (1 << 0)
+#define RLC_RL_BASE 0x30C1
+#define RLC_RL_SIZE 0x30C2
+#define RLC_LB_CNTL 0x30C3
+# define LOAD_BALANCE_ENABLE (1 << 0)
+#define RLC_SAVE_AND_RESTORE_BASE 0x30C4
+#define RLC_LB_CNTR_MAX 0x30C5
+#define RLC_LB_CNTR_INIT 0x30C6
+
+#define RLC_CLEAR_STATE_RESTORE_BASE 0x30C8
+
+#define RLC_UCODE_ADDR 0x30CB
+#define RLC_UCODE_DATA 0x30CC
+
+#define RLC_GPU_CLOCK_COUNT_LSB 0x30CE
+#define RLC_GPU_CLOCK_COUNT_MSB 0x30CF
+#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x30D0
+#define RLC_MC_CNTL 0x30D1
+#define RLC_UCODE_CNTL 0x30D2
+#define RLC_STAT 0x30D3
+# define RLC_BUSY_STATUS (1 << 0)
+# define GFX_POWER_STATUS (1 << 1)
+# define GFX_CLOCK_STATUS (1 << 2)
+# define GFX_LS_STATUS (1 << 3)
+
+#define RLC_PG_CNTL 0x30D7
+# define GFX_PG_ENABLE (1 << 0)
+# define GFX_PG_SRC (1 << 1)
+
+#define RLC_CGTT_MGCG_OVERRIDE 0x3100
+#define RLC_CGCG_CGLS_CTRL 0x3101
+# define CGCG_EN (1 << 0)
+# define CGLS_EN (1 << 1)
+
+#define RLC_TTOP_D 0x3105
+# define RLC_PUD(x) ((x) << 0)
+# define RLC_PUD_MASK (0xff << 0)
+# define RLC_PDD(x) ((x) << 8)
+# define RLC_PDD_MASK (0xff << 8)
+# define RLC_TTPD(x) ((x) << 16)
+# define RLC_TTPD_MASK (0xff << 16)
+# define RLC_MSD(x) ((x) << 24)
+# define RLC_MSD_MASK (0xff << 24)
+
+#define RLC_LB_INIT_CU_MASK 0x3107
+
+#define RLC_PG_AO_CU_MASK 0x310B
+#define RLC_MAX_PG_CU 0x310C
+# define MAX_PU_CU(x) ((x) << 0)
+# define MAX_PU_CU_MASK (0xff << 0)
+#define RLC_AUTO_PG_CTRL 0x310C
+# define AUTO_PG_EN (1 << 0)
+# define GRBM_REG_SGIT(x) ((x) << 3)
+# define GRBM_REG_SGIT_MASK (0xffff << 3)
+# define PG_AFTER_GRBM_REG_ST(x) ((x) << 19)
+# define PG_AFTER_GRBM_REG_ST_MASK (0x1fff << 19)
+
+#define RLC_SERDES_WR_MASTER_MASK_0 0x3115
+#define RLC_SERDES_WR_MASTER_MASK_1 0x3116
+#define RLC_SERDES_WR_CTRL 0x3117
+
+#define RLC_SERDES_MASTER_BUSY_0 0x3119
+#define RLC_SERDES_MASTER_BUSY_1 0x311A
+
+#define RLC_GCPM_GENERAL_3 0x311E
+
+#define DB_RENDER_CONTROL 0xA000
+
+#define DB_DEPTH_INFO 0xA00F
+
+#define PA_SC_RASTER_CONFIG 0xA0D4
+# define RB_MAP_PKR0(x) ((x) << 0)
+# define RB_MAP_PKR0_MASK (0x3 << 0)
+# define RB_MAP_PKR1(x) ((x) << 2)
+# define RB_MAP_PKR1_MASK (0x3 << 2)
+# define RASTER_CONFIG_RB_MAP_0 0
+# define RASTER_CONFIG_RB_MAP_1 1
+# define RASTER_CONFIG_RB_MAP_2 2
+# define RASTER_CONFIG_RB_MAP_3 3
+# define RB_XSEL2(x) ((x) << 4)
+# define RB_XSEL2_MASK (0x3 << 4)
+# define RB_XSEL (1 << 6)
+# define RB_YSEL (1 << 7)
+# define PKR_MAP(x) ((x) << 8)
+# define PKR_MAP_MASK (0x3 << 8)
+# define RASTER_CONFIG_PKR_MAP_0 0
+# define RASTER_CONFIG_PKR_MAP_1 1
+# define RASTER_CONFIG_PKR_MAP_2 2
+# define RASTER_CONFIG_PKR_MAP_3 3
+# define PKR_XSEL(x) ((x) << 10)
+# define PKR_XSEL_MASK (0x3 << 10)
+# define PKR_YSEL(x) ((x) << 12)
+# define PKR_YSEL_MASK (0x3 << 12)
+# define SC_MAP(x) ((x) << 16)
+# define SC_MAP_MASK (0x3 << 16)
+# define SC_XSEL(x) ((x) << 18)
+# define SC_XSEL_MASK (0x3 << 18)
+# define SC_YSEL(x) ((x) << 20)
+# define SC_YSEL_MASK (0x3 << 20)
+# define SE_MAP(x) ((x) << 24)
+# define SE_MAP_MASK (0x3 << 24)
+# define RASTER_CONFIG_SE_MAP_0 0
+# define RASTER_CONFIG_SE_MAP_1 1
+# define RASTER_CONFIG_SE_MAP_2 2
+# define RASTER_CONFIG_SE_MAP_3 3
+# define SE_XSEL(x) ((x) << 26)
+# define SE_XSEL_MASK (0x3 << 26)
+# define SE_YSEL(x) ((x) << 28)
+# define SE_YSEL_MASK (0x3 << 28)
+
+
+#define VGT_EVENT_INITIATOR 0xA2A4
+# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
+# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
+# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
+# define CACHE_FLUSH_TS (4 << 0)
+# define CACHE_FLUSH (6 << 0)
+# define CS_PARTIAL_FLUSH (7 << 0)
+# define VGT_STREAMOUT_RESET (10 << 0)
+# define END_OF_PIPE_INCR_DE (11 << 0)
+# define END_OF_PIPE_IB_END (12 << 0)
+# define RST_PIX_CNT (13 << 0)
+# define VS_PARTIAL_FLUSH (15 << 0)
+# define PS_PARTIAL_FLUSH (16 << 0)
+# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
+# define ZPASS_DONE (21 << 0)
+# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
+# define PERFCOUNTER_START (23 << 0)
+# define PERFCOUNTER_STOP (24 << 0)
+# define PIPELINESTAT_START (25 << 0)
+# define PIPELINESTAT_STOP (26 << 0)
+# define PERFCOUNTER_SAMPLE (27 << 0)
+# define SAMPLE_PIPELINESTAT (30 << 0)
+# define SAMPLE_STREAMOUTSTATS (32 << 0)
+# define RESET_VTX_CNT (33 << 0)
+# define VGT_FLUSH (36 << 0)
+# define BOTTOM_OF_PIPE_TS (40 << 0)
+# define DB_CACHE_FLUSH_AND_INV (42 << 0)
+# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
+# define FLUSH_AND_INV_DB_META (44 << 0)
+# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
+# define FLUSH_AND_INV_CB_META (46 << 0)
+# define CS_DONE (47 << 0)
+# define PS_DONE (48 << 0)
+# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
+# define THREAD_TRACE_START (51 << 0)
+# define THREAD_TRACE_STOP (52 << 0)
+# define THREAD_TRACE_FLUSH (54 << 0)
+# define THREAD_TRACE_FINISH (55 << 0)
+
+/* PIF PHY0 registers idx/data 0x8/0xc */
+#define PB0_PIF_CNTL 0x10
+# define LS2_EXIT_TIME(x) ((x) << 17)
+# define LS2_EXIT_TIME_MASK (0x7 << 17)
+# define LS2_EXIT_TIME_SHIFT 17
+#define PB0_PIF_PAIRING 0x11
+# define MULTI_PIF (1 << 25)
+#define PB0_PIF_PWRDOWN_0 0x12
+# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
+# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_0_SHIFT 24
+#define PB0_PIF_PWRDOWN_1 0x13
+# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
+# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_1_SHIFT 24
+
+#define PB0_PIF_PWRDOWN_2 0x17
+# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
+# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_2_SHIFT 24
+#define PB0_PIF_PWRDOWN_3 0x18
+# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
+# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_3_SHIFT 24
+/* PIF PHY1 registers idx/data 0x10/0x14 */
+#define PB1_PIF_CNTL 0x10
+#define PB1_PIF_PAIRING 0x11
+#define PB1_PIF_PWRDOWN_0 0x12
+#define PB1_PIF_PWRDOWN_1 0x13
+
+#define PB1_PIF_PWRDOWN_2 0x17
+#define PB1_PIF_PWRDOWN_3 0x18
+/* PCIE registers idx/data 0x30/0x34 */
+#define PCIE_CNTL2 0x1c /* PCIE */
+# define SLV_MEM_LS_EN (1 << 16)
+# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
+# define MST_MEM_LS_EN (1 << 18)
+# define REPLAY_MEM_LS_EN (1 << 19)
+#define PCIE_LC_STATUS1 0x28 /* PCIE */
+# define LC_REVERSE_RCVR (1 << 0)
+# define LC_REVERSE_XMIT (1 << 1)
+# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
+# define LC_OPERATING_LINK_WIDTH_SHIFT 2
+# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
+# define LC_DETECTED_LINK_WIDTH_SHIFT 5
+
+#define PCIE_P_CNTL 0x40 /* PCIE */
+# define P_IGNORE_EDB_ERR (1 << 6)
+
+/* PCIE PORT registers idx/data 0x38/0x3c */
+#define PCIE_LC_CNTL 0xa0
+# define LC_L0S_INACTIVITY(x) ((x) << 8)
+# define LC_L0S_INACTIVITY_MASK (0xf << 8)
+# define LC_L0S_INACTIVITY_SHIFT 8
+# define LC_L1_INACTIVITY(x) ((x) << 12)
+# define LC_L1_INACTIVITY_MASK (0xf << 12)
+# define LC_L1_INACTIVITY_SHIFT 12
+# define LC_PMI_TO_L1_DIS (1 << 16)
+# define LC_ASPM_TO_L1_DIS (1 << 24)
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
+# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
+# define LC_DYN_LANES_PWR_STATE_SHIFT 21
+#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
+# define LC_XMIT_N_FTS(x) ((x) << 0)
+# define LC_XMIT_N_FTS_MASK (0xff << 0)
+# define LC_XMIT_N_FTS_SHIFT 0
+# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
+# define LC_N_FTS_MASK (0xff << 24)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_GEN3_EN_STRAP (1 << 1)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
+# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
+# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
+# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+# define LC_CURRENT_DATA_RATE_SHIFT 13
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
+# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
+# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
+
+#define PCIE_LC_CNTL2 0xb1
+# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
+# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
+
+#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
+# define LC_GO_TO_RECOVERY (1 << 30)
+#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
+# define LC_REDO_EQ (1 << 5)
+# define LC_SET_QUIESCE (1 << 13)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG 0x3bd3
+#define UVD_UDEC_DB_ADDR_CONFIG 0x3bd4
+#define UVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
+#define UVD_RBC_RB_RPTR 0x3da4
+#define UVD_RBC_RB_WPTR 0x3da5
+#define UVD_STATUS 0x3daf
+
+#define UVD_CGC_CTRL 0x3dc2
+# define DCM (1 << 0)
+# define CG_DT(x) ((x) << 2)
+# define CG_DT_MASK (0xf << 2)
+# define CLK_OD(x) ((x) << 6)
+# define CLK_OD_MASK (0x1f << 6)
+
+ /* UVD CTX indirect */
+#define UVD_CGC_MEM_CTRL 0xC0
+#define UVD_CGC_CTRL2 0xC1
+# define DYN_OR_EN (1 << 0)
+# define DYN_RR_EN (1 << 1)
+# define G_DIV_ID(x) ((x) << 2)
+# define G_DIV_ID_MASK (0x7 << 2)
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define RADEON_PACKET_TYPE3 3
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_BASE_INDEX(x) ((x) << 0)
+#define GDS_PARTITION_BASE 2
+#define CE_PARTITION_BASE 3
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_ALLOC_GDS 0x1B
+#define PACKET3_WRITE_GDS_RAM 0x1C
+#define PACKET3_ATOMIC_GDS 0x1D
+#define PACKET3_ATOMIC 0x1E
+#define PACKET3_OCCLUSION_QUERY 0x1F
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER_CONST 0x31
+#define PACKET3_INDIRECT_BUFFER 0x3F
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define WRITE_DATA_DST_SEL(x) ((x) << 8)
+ /* 0 - register
+ * 1 - memory (sync - via GRBM)
+ * 2 - tc/l2
+ * 3 - gds
+ * 4 - reserved
+ * 5 - memory (async - direct)
+ */
+#define WR_ONE_ADDR (1 << 16)
+#define WR_CONFIRM (1 << 20)
+#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
+ /* 0 - me
+ * 1 - pfp
+ * 2 - ce
+ */
+#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - DST_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
+#define PACKET3_PFP_SYNC_ME 0x42
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_DEST_BASE_0_ENA (1 << 0)
+# define PACKET3_DEST_BASE_1_ENA (1 << 1)
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_DEST_BASE_2_ENA (1 << 19)
+# define PACKET3_DEST_BASE_3_ENA (1 << 21)
+# define PACKET3_TCL1_ACTION_ENA (1 << 22)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - EOP events
+ * 6 - EOS events
+ * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+ */
+#define INV_L2 (1 << 20)
+ /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_LOAD_CONFIG_REG 0x5F
+#define PACKET3_LOAD_CONTEXT_REG 0x60
+#define PACKET3_LOAD_SH_REG 0x61
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00002000
+#define PACKET3_SET_CONFIG_REG_END 0x00002c00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x000a000
+#define PACKET3_SET_CONTEXT_REG_END 0x000a400
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_SH_REG 0x76
+#define PACKET3_SET_SH_REG_START 0x00002c00
+#define PACKET3_SET_SH_REG_END 0x00003000
+#define PACKET3_SET_SH_REG_OFFSET 0x77
+#define PACKET3_ME_WRITE 0x7A
+#define PACKET3_SCRATCH_RAM_WRITE 0x7D
+#define PACKET3_SCRATCH_RAM_READ 0x7E
+#define PACKET3_CE_WRITE 0x7F
+#define PACKET3_LOAD_CONST_RAM 0x80
+#define PACKET3_WRITE_CONST_RAM 0x81
+#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
+#define PACKET3_DUMP_CONST_RAM 0x83
+#define PACKET3_INCREMENT_CE_COUNTER 0x84
+#define PACKET3_INCREMENT_DE_COUNTER 0x85
+#define PACKET3_WAIT_ON_CE_COUNTER 0x86
+#define PACKET3_WAIT_ON_DE_COUNTER 0x87
+#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
+#define PACKET3_SET_CE_DE_COUNTERS 0x89
+#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
+#define PACKET3_SWITCH_BUFFER 0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x200 /* not a register */
+
+#define DMA_RB_CNTL 0x3400
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0x3401
+#define DMA_RB_RPTR 0x3402
+#define DMA_RB_WPTR 0x3403
+
+#define DMA_RB_RPTR_ADDR_HI 0x3407
+#define DMA_RB_RPTR_ADDR_LO 0x3408
+
+#define DMA_IB_CNTL 0x3409
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+# define CMD_VMID_FORCE (1 << 31)
+#define DMA_IB_RPTR 0x340a
+#define DMA_CNTL 0x340b
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0x340d
+# define DMA_IDLE (1 << 0)
+#define DMA_TILING_CONFIG 0x342e
+
+#define DMA_POWER_CNTL 0x342f
+# define MEM_POWER_OVERRIDE (1 << 8)
+#define DMA_CLK_CTRL 0x3430
+
+#define DMA_PG 0x3435
+# define PG_CNTL_ENABLE (1 << 0)
+#define DMA_PGFSM_CONFIG 0x3436
+#define DMA_PGFSM_WRITE 0x3437
+
+#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((b) & 0x1) << 26) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_POLL_REG_MEM 0xe
+#define DMA_PACKET_NOP 0xf
+
+#define VCE_STATUS 0x20004
+#define VCE_VCPU_CNTL 0x20014
+#define VCE_CLK_EN (1 << 0)
+#define VCE_VCPU_CACHE_OFFSET0 0x20024
+#define VCE_VCPU_CACHE_SIZE0 0x20028
+#define VCE_VCPU_CACHE_OFFSET1 0x2002c
+#define VCE_VCPU_CACHE_SIZE1 0x20030
+#define VCE_VCPU_CACHE_OFFSET2 0x20034
+#define VCE_VCPU_CACHE_SIZE2 0x20038
+#define VCE_SOFT_RESET 0x20120
+#define VCE_ECPU_SOFT_RESET (1 << 0)
+#define VCE_FME_SOFT_RESET (1 << 2)
+#define VCE_RB_BASE_LO2 0x2016c
+#define VCE_RB_BASE_HI2 0x20170
+#define VCE_RB_SIZE2 0x20174
+#define VCE_RB_RPTR2 0x20178
+#define VCE_RB_WPTR2 0x2017c
+#define VCE_RB_BASE_LO 0x20180
+#define VCE_RB_BASE_HI 0x20184
+#define VCE_RB_SIZE 0x20188
+#define VCE_RB_RPTR 0x2018c
+#define VCE_RB_WPTR 0x20190
+#define VCE_CLOCK_GATING_A 0x202f8
+#define VCE_CLOCK_GATING_B 0x202fc
+#define VCE_UENC_CLOCK_GATING 0x205bc
+#define VCE_UENC_REG_CLOCK_GATING 0x205c0
+#define VCE_FW_REG_STATUS 0x20e10
+# define VCE_FW_REG_STATUS_BUSY (1 << 0)
+# define VCE_FW_REG_STATUS_PASS (1 << 3)
+# define VCE_FW_REG_STATUS_DONE (1 << 11)
+#define VCE_LMI_FW_START_KEYSEL 0x20e18
+#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
+#define VCE_LMI_CTRL2 0x20e74
+#define VCE_LMI_CTRL 0x20e98
+#define VCE_LMI_VM_CTRL 0x20ea0
+#define VCE_LMI_SWAP_CNTL 0x20eb4
+#define VCE_LMI_SWAP_CNTL1 0x20eb8
+#define VCE_LMI_CACHE_CTRL 0x20ef4
+
+#define VCE_CMD_NO_OP 0x00000000
+#define VCE_CMD_END 0x00000001
+#define VCE_CMD_IB 0x00000002
+#define VCE_CMD_FENCE 0x00000003
+#define VCE_CMD_TRAP 0x00000004
+#define VCE_CMD_IB_AUTO 0x00000005
+#define VCE_CMD_SEMAPHORE 0x00000006
+
+
+//#dce stupp
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define SI_CRTC0_REGISTER_OFFSET 0 //(0x6df0 - 0x6df0)/4
+#define SI_CRTC1_REGISTER_OFFSET 0x300 //(0x79f0 - 0x6df0)/4
+#define SI_CRTC2_REGISTER_OFFSET 0x2600 //(0x105f0 - 0x6df0)/4
+#define SI_CRTC3_REGISTER_OFFSET 0x2900 //(0x111f0 - 0x6df0)/4
+#define SI_CRTC4_REGISTER_OFFSET 0x2c00 //(0x11df0 - 0x6df0)/4
+#define SI_CRTC5_REGISTER_OFFSET 0x2f00 //(0x129f0 - 0x6df0)/4
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define AMDGPU_MM_INDEX 0x0000
+#define AMDGPU_MM_DATA 0x0001
+
+#define VERDE_NUM_CRTC 6
+#define BLACKOUT_MODE_MASK 0x00000007
+#define VGA_RENDER_CONTROL 0xC0
+#define R_000300_VGA_RENDER_CONTROL 0xC0
+#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS 0x1BA3
+#define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
+#define EVERGREEN_CRTC_CONTROL 0x1b9c
+#define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
+#define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+#define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
+#define EVERGREEN_GRPH_UPDATE 0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+
+#define EVERGREEN_DATA_FORMAT 0x1ac0
+# define EVERGREEN_INTERLEAVE_EN (1 << 0)
+
+#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0xf000
+#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0xc
+
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
+#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
+#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
+
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a45
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1845
+
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
+
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
+
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x1
+
+#define R600_D1GRPH_SWAP_CONTROL 0x1843
+#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
+
+#define AVIVO_D1VGA_CONTROL 0x00cc
+# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1 << 0)
+# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1 << 8)
+# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
+# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
+# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
+# define AVIVO_DVGA_CONTROL_ROTATE (1 << 24)
+#define AVIVO_D2VGA_CONTROL 0x00ce
+
+#define R600_BUS_CNTL 0x1508
+# define R600_BIOS_ROM_DIS (1 << 1)
+
+#define R600_ROM_CNTL 0x580
+# define R600_SCK_OVERWRITE (1 << 1)
+# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
+
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+
+#define FMT_BIT_DEPTH_CONTROL 0x1bf2
+#define FMT_TRUNCATE_EN (1 << 0)
+#define FMT_TRUNCATE_DEPTH (1 << 4)
+#define FMT_SPATIAL_DITHER_EN (1 << 8)
+#define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
+#define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
+#define FMT_FRAME_RANDOM_ENABLE (1 << 13)
+#define FMT_RGB_RANDOM_ENABLE (1 << 14)
+#define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
+#define FMT_TEMPORAL_DITHER_EN (1 << 16)
+#define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
+#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#define FMT_TEMPORAL_LEVEL (1 << 24)
+#define FMT_TEMPORAL_DITHER_RESET (1 << 25)
+#define FMT_25FRC_SEL(x) ((x) << 26)
+#define FMT_50FRC_SEL(x) ((x) << 28)
+#define FMT_75FRC_SEL(x) ((x) << 30)
+
+#define EVERGREEN_DC_LUT_CONTROL 0x1a80
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x1a81
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x1a82
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x1a83
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x1a84
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x1a85
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x1a86
+#define EVERGREEN_DC_LUT_30_COLOR 0x1a7c
+#define EVERGREEN_DC_LUT_RW_INDEX 0x1a79
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x1a7e
+#define EVERGREEN_DC_LUT_RW_MODE 0x1a78
+
+#define EVERGREEN_GRPH_ENABLE 0x1a00
+#define EVERGREEN_GRPH_CONTROL 0x1a01
+#define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+#define EVERGREEN_GRPH_DEPTH_8BPP 0
+#define EVERGREEN_GRPH_DEPTH_16BPP 1
+#define EVERGREEN_GRPH_DEPTH_32BPP 2
+#define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+#define EVERGREEN_ADDR_SURF_2_BANK 0
+#define EVERGREEN_ADDR_SURF_4_BANK 1
+#define EVERGREEN_ADDR_SURF_8_BANK 2
+#define EVERGREEN_ADDR_SURF_16_BANK 3
+#define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+#define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
+#define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+
+#define EVERGREEN_GRPH_FORMAT_INDEXED 0
+#define EVERGREEN_GRPH_FORMAT_ARGB1555 0
+#define EVERGREEN_GRPH_FORMAT_ARGB565 1
+#define EVERGREEN_GRPH_FORMAT_ARGB4444 2
+#define EVERGREEN_GRPH_FORMAT_AI88 3
+#define EVERGREEN_GRPH_FORMAT_MONO16 4
+#define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+
+/* 32 BPP */
+#define EVERGREEN_GRPH_FORMAT_ARGB8888 0
+#define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
+#define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
+#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
+#define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
+#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
+#define EVERGREEN_GRPH_FORMAT_RGB111110 6
+#define EVERGREEN_GRPH_FORMAT_BGR101111 7
+#define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+#define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+#define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
+#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
+#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
+#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+
+#define EVERGREEN_GRPH_SWAP_CONTROL 0x1a03
+#define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_ENDIAN_NONE 0
+# define EVERGREEN_GRPH_ENDIAN_8IN16 1
+# define EVERGREEN_GRPH_ENDIAN_8IN32 2
+# define EVERGREEN_GRPH_ENDIAN_8IN64 3
+
+#define EVERGREEN_D3VGA_CONTROL 0xf8
+#define EVERGREEN_D4VGA_CONTROL 0xf9
+#define EVERGREEN_D5VGA_CONTROL 0xfa
+#define EVERGREEN_D6VGA_CONTROL 0xfb
+
+#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
+
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
+#define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
+
+#define EVERGREEN_GRPH_PITCH 0x1a06
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x1a09
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x1a0a
+#define EVERGREEN_GRPH_X_START 0x1a0b
+#define EVERGREEN_GRPH_Y_START 0x1a0c
+#define EVERGREEN_GRPH_X_END 0x1a0d
+#define EVERGREEN_GRPH_Y_END 0x1a0e
+#define EVERGREEN_GRPH_UPDATE 0x1a11
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL 0x1a12
+#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+#define EVERGREEN_VIEWPORT_START 0x1b5c
+#define EVERGREEN_VIEWPORT_SIZE 0x1b5d
+#define EVERGREEN_DESKTOP_HEIGHT 0x1ac1
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL 0x1a66
+# define EVERGREEN_CURSOR_EN (1 << 0)
+# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
+# define EVERGREEN_CURSOR_MONO 0
+# define EVERGREEN_CURSOR_24_1 1
+# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
+# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
+# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
+# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
+# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
+# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
+# define EVERGREEN_CURSOR_URGENT_1_8 1
+# define EVERGREEN_CURSOR_URGENT_1_4 2
+# define EVERGREEN_CURSOR_URGENT_3_8 3
+# define EVERGREEN_CURSOR_URGENT_1_2 4
+#define EVERGREEN_CUR_SURFACE_ADDRESS 0x1a67
+# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
+#define EVERGREEN_CUR_SIZE 0x1a68
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x1a69
+#define EVERGREEN_CUR_POSITION 0x1a6a
+#define EVERGREEN_CUR_HOT_SPOT 0x1a6b
+#define EVERGREEN_CUR_COLOR1 0x1a6c
+#define EVERGREEN_CUR_COLOR2 0x1a6d
+#define EVERGREEN_CUR_UPDATE 0x1a6e
+# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
+# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
+# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
+# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+
+#define NI_INPUT_CSC_CONTROL 0x1a35
+# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_CSC_BYPASS 0
+# define NI_INPUT_CSC_PROG_COEFF 1
+# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
+# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL 0x1a3c
+# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
+# define NI_OUTPUT_CSC_BYPASS 0
+# define NI_OUTPUT_CSC_TV_RGB 1
+# define NI_OUTPUT_CSC_YCBCR_601 2
+# define NI_OUTPUT_CSC_YCBCR_709 3
+# define NI_OUTPUT_CSC_PROG_COEFF 4
+# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL 0x1a58
+# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_DEGAMMA_BYPASS 0
+# define NI_DEGAMMA_SRGB_24 1
+# define NI_DEGAMMA_XVYCC_222 2
+# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
+# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
+# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL 0x1a59
+# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
+# define NI_GAMUT_REMAP_BYPASS 0
+# define NI_GAMUT_REMAP_PROG_COEFF 1
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL 0x1aa0
+# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
+# define NI_REGAMMA_BYPASS 0
+# define NI_REGAMMA_SRGB_24 1
+# define NI_REGAMMA_XVYCC_222 2
+# define NI_REGAMMA_PROG_A 3
+# define NI_REGAMMA_PROG_B 4
+# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
+
+
+#define NI_PRESCALE_GRPH_CONTROL 0x1a2d
+# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL 0x1a31
+# define NI_OVL_PRESCALE_BYPASS (1 << 4)
+
+#define NI_INPUT_GAMMA_CONTROL 0x1a10
+# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_GAMMA_USE_LUT 0
+# define NI_INPUT_GAMMA_BYPASS 1
+# define NI_INPUT_GAMMA_SRGB_24 2
+# define NI_INPUT_GAMMA_XVYCC_222 3
+# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
+
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x1
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000
+#define SRBM_STATUS__IH_BUSY_MASK 0x20000
+#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK 0x400
+
+#define BLACKOUT_MODE_MASK 0x00000007
+#define VGA_RENDER_CONTROL 0xC0
+#define R_000300_VGA_RENDER_CONTROL 0xC0
+#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS 0x1BA3
+#define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
+#define EVERGREEN_CRTC_CONTROL 0x1b9c
+# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
+# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+# define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
+#define EVERGREEN_GRPH_UPDATE 0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
+
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
+
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
+
+#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
+#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
+#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
+#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
+
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x6
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x200
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x1000
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8000
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40000
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x12
+
+#define MC_SEQ_MISC0__MT__MASK 0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
+#define MC_SEQ_MISC0__MT__DDR2 0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3 0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4 0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5 0x50000000
+#define MC_SEQ_MISC0__MT__HBM 0x60000000
+#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
+
+#define SRBM_STATUS__MCB_BUSY_MASK 0x200
+#define SRBM_STATUS__MCB_BUSY__SHIFT 0x9
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x400
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0xa
+#define SRBM_STATUS__MCC_BUSY_MASK 0x800
+#define SRBM_STATUS__MCC_BUSY__SHIFT 0xb
+#define SRBM_STATUS__MCD_BUSY_MASK 0x1000
+#define SRBM_STATUS__MCD_BUSY__SHIFT 0xc
+#define SRBM_STATUS__VMC_BUSY_MASK 0x100
+#define SRBM_STATUS__VMC_BUSY__SHIFT 0x8
+
+
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
+#define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
+#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
+#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
+#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
+
+#define CONFIG_CNTL 0x1509
+#define CC_DRM_ID_STRAPS 0X1559
+#define AMDGPU_PCIE_INDEX 0xc
+#define AMDGPU_PCIE_DATA 0xd
+
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
+#define DMA_MODE 0x342f
+#define DMA_RB_RPTR_ADDR_HI 0x3407
+#define DMA_RB_RPTR_ADDR_LO 0x3408
+#define DMA_BUSY_MASK 0x20
+#define DMA1_BUSY_MASK 0X40
+#define SDMA_MAX_INSTANCE 2
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
+#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define PCIE_PORT_INDEX 0xe
+#define PCIE_PORT_DATA 0xf
+#define EVERGREEN_PIF_PHY0_INDEX 0x8
+#define EVERGREEN_PIF_PHY0_DATA 0xc
+#define EVERGREEN_PIF_PHY1_INDEX 0x10
+#define EVERGREEN_PIF_PHY1_DATA 0x14
+
+#define MC_VM_FB_OFFSET 0x81a
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
index f3e53b118361..19802e96417e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
@@ -34,6 +34,7 @@
#define mmUVD_UDEC_ADDR_CONFIG 0x3bd3
#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
+#define mmUVD_NO_OP 0x3bff
#define mmUVD_SEMA_CNTL 0x3d00
#define mmUVD_LMI_EXT40_ADDR 0x3d26
#define mmUVD_CTX_INDEX 0x3d28
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
index eb4cf53427da..cc972d237a7e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
@@ -34,6 +34,7 @@
#define mmUVD_UDEC_ADDR_CONFIG 0x3bd3
#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
+#define mmUVD_NO_OP 0x3bff
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index ec69869c55ff..378f4b6b43da 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -35,6 +35,7 @@
#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
#define mmUVD_POWER_STATUS_U 0x3bfd
+#define mmUVD_NO_OP 0x3bff
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 3493da5c8f0e..4a4d3797a6d3 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -494,6 +494,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
@@ -526,6 +527,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index b86aba9d019f..df7c18b6a02a 100644..100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -119,6 +119,8 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_GFX_CU_INFO,
CGS_SYSTEM_INFO_GFX_SE_INFO,
+ CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
+ CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
@@ -159,6 +161,7 @@ struct cgs_clock_limits {
*/
struct cgs_firmware_info {
uint16_t version;
+ uint16_t fw_version;
uint16_t feature_version;
uint32_t image_size;
uint64_t mc_addr;
diff --git a/drivers/gpu/drm/amd/powerplay/Kconfig b/drivers/gpu/drm/amd/powerplay/Kconfig
deleted file mode 100644
index af380335b425..000000000000
--- a/drivers/gpu/drm/amd/powerplay/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config DRM_AMD_POWERPLAY
- bool "Enable AMD powerplay component"
- depends on DRM_AMDGPU
- default n
- help
- select this option will enable AMD powerplay component.
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index abbb658bdc1e..7174f7a68266 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -31,6 +31,7 @@
#include "eventmanager.h"
#include "pp_debug.h"
+
#define PP_CHECK(handle) \
do { \
if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \
@@ -162,12 +163,12 @@ static int pp_hw_fini(void *handle)
pp_handle = (struct pp_instance *)handle;
eventmgr = pp_handle->eventmgr;
- if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL)
+ if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL)
eventmgr->pp_eventmgr_fini(eventmgr);
smumgr = pp_handle->smu_mgr;
- if (smumgr != NULL || smumgr->smumgr_funcs != NULL ||
+ if (smumgr != NULL && smumgr->smumgr_funcs != NULL &&
smumgr->smumgr_funcs->smu_fini != NULL)
smumgr->smumgr_funcs->smu_fini(smumgr);
@@ -190,11 +191,9 @@ static int pp_sw_reset(void *handle)
}
-static int pp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
struct pp_hwmgr *hwmgr;
- uint32_t msg_id, pp_state;
if (handle == NULL)
return -EINVAL;
@@ -208,76 +207,7 @@ static int pp_set_clockgating_state(void *handle,
return 0;
}
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
-
- /* Enable/disable GFX blocks clock gating through SMU */
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_3D,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_RLC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
- /* Enable/disable System blocks clock gating through SMU */
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_MC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_ROM,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_DRM,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_HDP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_SDMA,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
- return 0;
+ return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}
static int pp_set_powergating_state(void *handle,
@@ -361,7 +291,7 @@ const struct amd_ip_funcs pp_ip_funcs = {
.is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset,
- .set_clockgating_state = pp_set_clockgating_state,
+ .set_clockgating_state = NULL,
.set_powergating_state = pp_set_powergating_state,
};
@@ -537,7 +467,6 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
case AMD_PP_EVENT_READJUST_POWER_STATE:
- pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
default:
@@ -576,28 +505,6 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
}
}
-static void
-pp_debugfs_print_current_performance_level(void *handle,
- struct seq_file *m)
-{
- struct pp_hwmgr *hwmgr;
-
- if (handle == NULL)
- return;
-
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
- return;
-
- if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return;
- }
-
- hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
-}
-
static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr;
@@ -764,15 +671,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
PP_CHECK_HW(hwmgr);
if (!hwmgr->hardcode_pp_table) {
- hwmgr->hardcode_pp_table =
- kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
if (!hwmgr->hardcode_pp_table)
return -ENOMEM;
-
- /* to avoid powerplay crash when hardcode pptable is empty */
- memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size);
}
memcpy(hwmgr->hardcode_pp_table, buf, size);
@@ -897,6 +801,25 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
}
+static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->read_sensor == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -909,7 +832,6 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.powergate_vce = pp_dpm_powergate_vce,
.powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks,
- .print_current_performance_level = pp_debugfs_print_current_performance_level,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
@@ -923,6 +845,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.set_sclk_od = pp_dpm_set_sclk_od,
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
+ .read_sensor = pp_dpm_read_sensor,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 635fc4b48184..92b117843875 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = {
unblock_adjust_power_state_tasks,
set_cpu_power_state,
notify_hw_power_source_tasks,
+ get_2d_performance_state_tasks,
+ set_performance_state_tasks,
/* updateDALConfigurationTasks,
variBrightDisplayConfigurationChangeTasks, */
adjust_power_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
index a46225c0fc01..489908887e9c 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
@@ -70,11 +70,12 @@ int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
int i;
table_entries = hwmgr->num_ps;
+
state = hwmgr->ps;
for (i = 0; i < table_entries; i++) {
if (state->id == *state_id) {
- hwmgr->request_ps = state;
+ memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
return 0;
}
state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
@@ -100,13 +101,14 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
if (requested == NULL)
return 0;
+ phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+
if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
equal = false;
if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
- phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
- hwmgr->current_ps = requested;
+ memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index f7ce4cb71346..5fff1d636ab7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -3,14 +3,12 @@
# It provides the hardware management services for the driver.
HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
- hardwaremanager.o pp_acpi.o cz_hwmgr.o \
- cz_clockpowergating.o \
- tonga_processpptables.o ppatomctrl.o \
- tonga_hwmgr.o pppcielanes.o tonga_thermal.o\
- fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
- fiji_clockpowergating.o fiji_thermal.o \
- polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
- polaris10_clockpowergating.o
+ hardwaremanager.o pp_acpi.o cz_hwmgr.o \
+ cz_clockpowergating.o pppcielanes.o\
+ process_pptables_v1_0.o ppatomctrl.o \
+ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
+ smu7_clockpowergating.o
+
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 8cc0df9b534a..7e4fcbbbe086 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -178,7 +178,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
int result;
cz_hwmgr->gfx_ramp_step = 256*25/100;
-
cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
@@ -186,33 +185,19 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
-
cz_hwmgr->clock_slow_down_freq = 25000;
-
cz_hwmgr->skip_clock_slow_down = 1;
-
cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
-
cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
-
cz_hwmgr->voting_rights_clients = 0x00C00033;
-
cz_hwmgr->static_screen_threshold = 8;
-
cz_hwmgr->ddi_power_gating_disabled = 0;
-
cz_hwmgr->bapm_enabled = 1;
-
cz_hwmgr->voltage_drop_threshold = 0;
-
cz_hwmgr->gfx_power_gating_threshold = 500;
-
cz_hwmgr->vce_slow_sclk_threshold = 20000;
-
cz_hwmgr->dce_slow_sclk_threshold = 30000;
-
cz_hwmgr->disable_driver_thermal_policy = 1;
-
cz_hwmgr->disable_nb_ps3_in_battery = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -221,9 +206,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_NonABMSupportInPPLib);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep);
-
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicM3Arbiter);
@@ -233,9 +215,7 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_DynamicPatchPowerState);
cz_hwmgr->thermal_auto_throttling_treshold = 0;
-
cz_hwmgr->tdr_clock = 0;
-
cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -450,19 +430,12 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
(uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
cz_hwmgr->boot_power_level.dsDividerIndex = 0;
-
cz_hwmgr->boot_power_level.ssDividerIndex = 0;
-
cz_hwmgr->boot_power_level.allowGnbSlow = 1;
-
cz_hwmgr->boot_power_level.forceNBPstate = 0;
-
cz_hwmgr->boot_power_level.hysteresis_up = 0;
-
cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
-
cz_hwmgr->boot_power_level.display_wm = 0;
-
cz_hwmgr->boot_power_level.vce_wm = 0;
return 0;
@@ -749,7 +722,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
clock = hwmgr->display_config.min_core_set_clock;
-;
if (clock == 0)
printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
@@ -832,7 +804,7 @@ static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetWatermarkFrequency,
- cz_hwmgr->sclk_dpm.soft_max_clk);
+ cz_hwmgr->sclk_dpm.soft_max_clk);
return 0;
}
@@ -858,9 +830,9 @@ static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr,
PP_DBG_LOG("enabling ALL SMU features.\n");
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_EnableAllSmuFeatures,
- dpm_features);
+ hwmgr->smumgr,
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features);
if (ret == 0)
cz_hwmgr->is_nb_dpm_enabled = true;
}
@@ -1246,7 +1218,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- if (hwmgr != NULL || hwmgr->backend != NULL) {
+ if (hwmgr != NULL && hwmgr->backend != NULL) {
kfree(hwmgr->backend);
kfree(hwmgr);
}
@@ -1402,10 +1374,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
PPSMC_MSG_SetUvdHardMin));
cz_enable_disable_uvd_dpm(hwmgr, true);
- } else
+ } else {
cz_enable_disable_uvd_dpm(hwmgr, true);
- } else
+ }
+ } else {
cz_enable_disable_uvd_dpm(hwmgr, false);
+ }
return 0;
}
@@ -1564,78 +1538,6 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
return sizeof(struct cz_power_state);
}
-static void
-cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
-
- struct phm_vce_clock_voltage_dependency_table *vce_table =
- hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
- struct phm_uvd_clock_voltage_dependency_table *uvd_table =
- hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
- uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
- uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
- TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
- uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
- TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
-
- uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
- uint16_t vddnb, vddgfx;
- int result;
-
- if (sclk_index >= NUM_SCLK_LEVELS) {
- seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
- } else {
- sclk = table->entries[sclk_index].clk;
- seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
- }
-
- tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
- CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
- tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
- CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
- seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
-
- seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
- if (!cz_hwmgr->uvd_power_gated) {
- if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
- seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
- } else {
- vclk = uvd_table->entries[uvd_index].vclk;
- dclk = uvd_table->entries[uvd_index].dclk;
- seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
- }
- }
-
- seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
- if (!cz_hwmgr->vce_power_gated) {
- if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
- seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
- } else {
- ecclk = vce_table->entries[vce_index].ecclk;
- seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
- }
- }
-
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
- if (0 == result) {
- activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
- activity_percent = activity_percent > 100 ? 100 : activity_percent;
- } else {
- activity_percent = 50;
- }
-
- seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent);
-}
-
static void cz_hw_print_display_cfg(
const struct cc6_settings *cc6_settings)
{
@@ -1690,13 +1592,10 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
if (separation_time !=
- hw_data->cc6_settings.cpu_pstate_separation_time
- || cc6_disable !=
- hw_data->cc6_settings.cpu_cc6_disable
- || pstate_disable !=
- hw_data->cc6_settings.cpu_pstate_disable
- || pstate_switch_disable !=
- hw_data->cc6_settings.nb_pstate_switch_disable) {
+ hw_data->cc6_settings.cpu_pstate_separation_time ||
+ cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
+ pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
+ pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
hw_data->cc6_settings.cc6_setting_changed = true;
@@ -1799,8 +1698,7 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
ps = cast_const_PhwCzPowerState(state);
level_index = index > ps->level - 1 ? ps->level - 1 : index;
-
- level->coreClock = ps->levels[level_index].engineClock;
+ level->coreClock = ps->levels[level_index].engineClock;
if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
for (i = 1; i < ps->level; i++) {
@@ -1887,6 +1785,107 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
+static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+ uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+ uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+
+ uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
+ uint16_t vddnb, vddgfx;
+ int result;
+
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ if (sclk_index < NUM_SCLK_LEVELS) {
+ sclk = table->entries[sclk_index].clk;
+ *value = sclk;
+ return 0;
+ }
+ return -EINVAL;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+ vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ *value = vddnb;
+ return 0;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+ vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ *value = vddgfx;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_VCLK:
+ if (!cz_hwmgr->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ vclk = uvd_table->entries[uvd_index].vclk;
+ *value = vclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_DCLK:
+ if (!cz_hwmgr->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ dclk = uvd_table->entries[uvd_index].dclk;
+ *value = dclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_VCE_ECCLK:
+ if (!cz_hwmgr->vce_power_gated) {
+ if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ ecclk = vce_table->entries[vce_index].ecclk;
+ *value = ecclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ if (0 == result) {
+ activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
+ activity_percent = activity_percent > 100 ? 100 : activity_percent;
+ } else {
+ activity_percent = 50;
+ }
+ *value = activity_percent;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *value = cz_hwmgr->uvd_power_gated ? 0 : 1;
+ return 0;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *value = cz_hwmgr->vce_power_gated ? 0 : 1;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini,
@@ -1902,7 +1901,6 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.patch_boot_state = cz_dpm_patch_boot_state,
.get_pp_table_entry = cz_dpm_get_pp_table_entry,
.get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
- .print_current_perforce_level = cz_print_current_perforce_level,
.set_cpu_power_state = cz_set_cpu_power_state,
.store_cc6_data = cz_store_cc6_data,
.force_clock_level = cz_force_clock_level,
@@ -1912,6 +1910,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
.get_clock_by_type = cz_get_clock_by_type,
.get_max_high_clocks = cz_get_max_high_clocks,
+ .read_sensor = cz_read_sensor,
};
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
deleted file mode 100644
index 5afe82068b29..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "fiji_clockpowergating.h"
-#include "fiji_ppsmc.h"
-#include "fiji_hwmgr.h"
-
-int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
- data->acp_power_gated = false;
-
- return 0;
-}
-
-int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->uvd_power_gated == bgate)
- return 0;
-
- data->uvd_power_gated = bgate;
-
- if (bgate) {
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
- fiji_update_uvd_dpm(hwmgr, true);
- } else {
- fiji_update_uvd_dpm(hwmgr, false);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- }
-
- return 0;
-}
-
-int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_set_power_state_input states;
- const struct pp_power_state *pcurrent;
- struct pp_power_state *requested;
-
- if (data->vce_power_gated == bgate)
- return 0;
-
- data->vce_power_gated = bgate;
-
- pcurrent = hwmgr->current_ps;
- requested = hwmgr->request_ps;
-
- states.pcurrent_state = &(pcurrent->hardware);
- states.pnew_state = &(requested->hardware);
-
- fiji_update_vce_dpm(hwmgr, &states);
- fiji_enable_disable_vce_dpm(hwmgr, !bgate);
-
- return 0;
-}
-
-int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->samu_power_gated == bgate)
- return 0;
-
- data->samu_power_gated = bgate;
-
- if (bgate)
- fiji_update_samu_dpm(hwmgr, true);
- else
- fiji_update_samu_dpm(hwmgr, false);
-
- return 0;
-}
-
-int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->acp_power_gated == bgate)
- return 0;
-
- data->acp_power_gated = bgate;
-
- if (bgate)
- fiji_update_acp_dpm(hwmgr, true);
- else
- fiji_update_acp_dpm(hwmgr, false);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
deleted file mode 100644
index 32d43e8fecb2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_DYN_DEFAULTS_H
-#define FIJI_DYN_DEFAULTS_H
-
-/** \file
-* Volcanic Islands Dynamic default parameters.
-*/
-
-enum FIJIdpm_TrendDetection
-{
- FIJIAdpm_TrendDetection_AUTO,
- FIJIAdpm_TrendDetection_UP,
- FIJIAdpm_TrendDetection_DOWN
-};
-typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
-
-/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
-
-/* Bit vector representing same fields as hardware register. */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ????
- * HDP_busy
- * IH_busy
- * UVD_busy
- * VCE_busy
- * ACP_busy
- * SAMU_busy
- * SDMA enabled */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ????
- * SH_Gfx_busy
- * RB_Gfx_busy
- * VCE_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility.
- * FE_Gfx_busy
- * RB_Gfx_busy
- * ACP_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility.
- * FE_Gfx_busy
- * SH_Gfx_busy
- * UVD_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy
- * VCE_busy
- * ACP_busy
- * SAMU_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */
-
-
-/* thermal protection counter (units). */
-#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0
-
-/* static screen threshold */
-#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPFIJI_REFERENCEDIVIDER_DFLT 4
-
-/* ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687
-
-#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035
-#define PPFIJI_CGULVCONTROL_DFLT 0x00007450
-#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/
-#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
deleted file mode 100644
index 120a9e2c3152..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ /dev/null
@@ -1,5599 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-
-#include "hwmgr.h"
-#include "fiji_smumgr.h"
-#include "atombios.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "cgs_common.h"
-#include "fiji_dyn_defaults.h"
-#include "fiji_powertune.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "pppcielanes.h"
-#include "fiji_hwmgr.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
-#include "pp_debug.h"
-#include "pp_acpi.h"
-#include "amd_pcie_helpers.h"
-#include "cgs_linux.h"
-#include "ppinterrupt.h"
-
-#include "fiji_clockpowergating.h"
-#include "fiji_thermal.h"
-
-#define VOLTAGE_SCALE 4
-#define SMC_RAM_END 0x40000
-#define VDDC_VDDCI_DELTA 300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-/* From smc_reg.h */
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-#define VDDC_VDDCI_DELTA 300
-
-#define ixSWRST_COMMAND_1 0x1400103
-#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
- DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
- DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
- DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
- DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
- DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
-};
-
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct fiji_power_state *cast_phw_fiji_power_state(
- struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL;);
-
- return (struct fiji_power_state *)hw_ps;
-}
-
-const struct fiji_power_state *cast_const_phw_fiji_power_state(
- const struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL;);
-
- return (const struct fiji_power_state *)hw_ps;
-}
-
-static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &data->ulv;
-
- ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT;
- data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7;
-
- data->static_screen_threshold_unit =
- PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->static_screen_threshold =
- PPFIJI_STATICSCREENTHRESHOLD_DFLT;
-
- /* Unset ABM cap as it moved to DAL.
- * Add PHM_PlatformCaps_NonABMSupportInPPLib
- * for re-direct ABM related request to DAL
- */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ABM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_NonABMSupportInPPLib);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicACTiming);
-
- fiji_initialize_power_tune_defaults(hwmgr);
-
- data->mclk_stutter_mode_threshold = 60000;
- data->pcie_gen_performance.max = PP_PCIEGen1;
- data->pcie_gen_performance.min = PP_PCIEGen3;
- data->pcie_gen_power_saving.max = PP_PCIEGen1;
- data->pcie_gen_power_saving.min = PP_PCIEGen3;
- data->pcie_lane_performance.max = 0;
- data->pcie_lane_performance.min = 16;
- data->pcie_lane_power_saving.max = 0;
- data->pcie_lane_power_saving.min = 16;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-}
-
-static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, int32_t *sclk)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
- voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
- if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
- break;
- }
-
- PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
- "Can't find requested voltage id in vdd_dep_on_sclk table!",
- return -EINVAL;
- );
-
- *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
-
- return 0;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint16_t vv_id;
- uint16_t vddc = 0;
- uint16_t evv_default = 1150;
- uint16_t i, j;
- uint32_t sclk = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- int result;
-
- for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) {
- vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (!fiji_get_sclk_for_voltage_evv(hwmgr,
- table_info->vddc_lookup_table, vv_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- for (j = 1; j < sclk_table->count; j++) {
- if (sclk_table->entries[j].clk == sclk &&
- sclk_table->entries[j].cks_enable == 0) {
- sclk += 5000;
- break;
- }
- }
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableDriverEVV))
- result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true);
- else
- result = -EINVAL;
-
- if (result)
- result = atomctrl_get_voltage_evv_on_sclk(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc);
-
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddc < 2000),
- "Invalid VDDC value, greater than 2v!", result = -EINVAL;);
-
- if (result)
- /* 1.15V is the default safe value for Fiji */
- vddc = evv_default;
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddc != 0 && vddc != vv_id) {
- data->vddc_leakage.actual_voltage
- [data->vddc_leakage.count] = vddc;
- data->vddc_leakage.leakage_id
- [data->vddc_leakage.count] = vv_id;
- data->vddc_leakage.count++;
- }
- }
- }
- return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
- */
-static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
- uint16_t *voltage, struct fiji_leakage_voltage *leakage_table)
-{
- uint32_t index;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (index = 0; index < leakage_table->count; index++) {
- /* if this voltage matches a leakage voltage ID */
- /* patch with actual leakage voltage */
- if (leakage_table->leakage_id[index] == *voltage) {
- *voltage = leakage_table->actual_voltage[index];
- break;
- }
- }
-
- if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pointer to voltage lookup table
-* @param pointer to leakage table
-* @return always 0
-*/
-static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- struct fiji_leakage_voltage *leakage_table)
-{
- uint32_t i;
-
- for (i = 0; i < lookup_table->count; i++)
- fiji_patch_with_vdd_leakage(hwmgr,
- &lookup_table->entries[i].us_vdd, leakage_table);
-
- return 0;
-}
-
-static int fiji_patch_clock_voltage_limits_with_vddc_leakage(
- struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table,
- uint16_t *vddc)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
- hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
- table_info->max_clock_voltage_on_dc.vddc;
- return 0;
-}
-
-static int fiji_patch_voltage_dependency_tables_with_lookup_table(
- struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
- table_info->vdd_dep_on_mclk;
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- voltageId = mclk_table->entries[entryId].vddInd;
- mclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mm_table->count; ++entryId) {
- voltageId = mm_table->entries[entryId].vddcInd;
- mm_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- return 0;
-
-}
-
-static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage. */
- return 0;
-}
-
-static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage from mm table. */
- return 0;
-}
-
-static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
- table_size = lookup_table->count;
-
- PP_ASSERT_WITH_CODE(0 != lookup_table->count,
- "Lookup table is empty", return -EINVAL);
-
- /* Sorting voltages */
- for (i = 0; i < table_size - 1; i++) {
- for (j = i + 1; j > 0; j--) {
- if (lookup_table->entries[j].us_vdd <
- lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
- }
- }
- }
-
- return 0;
-}
-
-static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- int tmp_result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr,
- table_info->vddc_lookup_table, &(data->vddc_leakage));
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
- &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_calc_voltage_dependency_tables(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
- if(tmp_result)
- result = tmp_result;
-
- return result;
-}
-
-static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
- data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->
- entries[allowed_sclk_vdd_table->count - 1].vddc;
-
- table_info->max_clock_voltage_on_ac.sclk =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.mclk =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.vddc =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
- table_info->max_clock_voltage_on_ac.vddci =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
- hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
- table_info->max_clock_voltage_on_ac.sclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
- table_info->max_clock_voltage_on_ac.mclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
- table_info->max_clock_voltage_on_ac.vddc;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
- table_info->max_clock_voltage_on_ac.vddci;
-
- return 0;
-}
-
-static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
- uint32_t speedCntl = 0;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speedCntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
- uint32_t link_width;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
- PP_ASSERT_WITH_CODE((7 >= link_width),
- "Invalid PCIe lane width!", return 0);
-
- return decode_pcie_lane_width(link_width);
-}
-
-/** Patch the Boot State to match VBIOS boot clocks and voltage.
-*
-* @param hwmgr Pointer to the hardware manager.
-* @param pPowerState The address of the PowerState instance being created.
-*
-*/
-static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps;
- ATOM_FIRMWARE_INFO_V2_2 *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- /* First retrieve the Boot clocks and VDDC from the firmware info table.
- * We assume here that fw_info is unchanged if this call fails.
- */
- fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
- hwmgr->device, index,
- &size, &frev, &crev);
- if (!fw_info)
- /* During a test, there is no firmware info table. */
- return 0;
-
- /* Patch the state. */
- data->vbios_boot_state.sclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultEngineClock);
- data->vbios_boot_state.mclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultMemoryClock);
- data->vbios_boot_state.mvdd_bootup_value =
- le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
- data->vbios_boot_state.vddc_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCVoltage);
- data->vbios_boot_state.vddci_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
- data->vbios_boot_state.pcie_gen_bootup_value =
- fiji_get_current_pcie_speed(hwmgr);
- data->vbios_boot_state.pcie_lane_bootup_value =
- (uint16_t)fiji_get_current_pcie_lane_number(hwmgr);
-
- /* set boot power state */
- ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
- ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
- ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
- ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
- return 0;
-}
-
-static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- return phm_hwmgr_backend_fini(hwmgr);
-}
-
-static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data;
- uint32_t i;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- bool stay_in_boot;
- int result;
-
- data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
-
- data->dll_default_on = false;
- data->sram_end = SMC_RAM_END;
-
- for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
- data->activity_target[i] = FIJI_AT_DFLT;
-
- data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
- data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT;
- data->mclk_dpm0_activity_target = 0xa;
-
- data->sclk_dpm_key_disabled = 0;
- data->mclk_dpm_key_disabled = 0;
- data->pcie_dpm_key_disabled = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UnTabledHardwareInterface);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep);
-
- data->gpio_debug = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPatchPowerState);
-
- /* need to set voltage control types before EVV patching */
- data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE;
- data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
- data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
-
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
- data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl))
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
- data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
-
- if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI);
-
- if (table_info && table_info->cac_dtp_table->usClockStretchAmount)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
-
- fiji_init_dpm_defaults(hwmgr);
-
- /* Get leakage voltage based on leakage ID. */
- fiji_get_evv_voltages(hwmgr);
-
- /* Patch our voltage dependency table with actual leakage voltage
- * We need to perform leakage translation before it's used by other functions
- */
- fiji_complete_dependency_tables(hwmgr);
-
- /* Parse pptable data read from VBIOS */
- fiji_set_private_data_based_on_pptable(hwmgr);
-
- /* ULV Support */
- data->ulv.ulv_supported = true; /* ULV feature is enabled by default */
-
- /* Initalize Dynamic State Adjustment Rule Settings */
- result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
- if (!result) {
- data->uvd_enabled = false;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableSMU7ThermalManagement);
- data->vddc_phase_shed_control = false;
- }
-
- stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StayInBootState);
-
- if (0 == result) {
- struct cgs_system_info sys_info = {0};
-
- data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- FIJI_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
- if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp &&
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucFanControlMode) {
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
- table_info->cac_dtp_table->usOperatingTempMinLimit;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
- table_info->cac_dtp_table->usOperatingTempMaxLimit;
- hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
- table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
- table_info->cac_dtp_table->usOperatingTempStep;
- hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
- table_info->cac_dtp_table->usTargetOperatingTemp;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport);
- }
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- fiji_hwmgr_backend_fini(hwmgr);
- }
-
- return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_2);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_3);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_4);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_SPREAD_SPECTRUM);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_SPREAD_SPECTRUM_2);
-
- return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_get_memory_type(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
- data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
-
- return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
- return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
- data->acp_power_gated = false;
- data->pg_acp_init = true;
-
- return 0;
-}
-
-static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- data->low_sclk_interrupt_threshold = 0;
-
- return 0;
-}
-
-static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = fiji_read_clock_registers(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to read clock registers!", result = tmp_result);
-
- tmp_result = fiji_get_memory_type(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get memory type!", result = tmp_result);
-
- tmp_result = fiji_enable_acpi_power_management(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ACPI power management!", result = tmp_result);
-
- tmp_result = fiji_init_power_gate_state(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init power gate state!", result = tmp_result);
-
- tmp_result = tonga_get_mc_microcode_version(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get MC microcode version!", result = tmp_result);
-
- tmp_result = fiji_init_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init sclk threshold!", result = tmp_result);
-
- return result;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-*/
-static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr)
-{
- const struct fiji_hwmgr *data =
- (const struct fiji_hwmgr *)(hwmgr->backend);
-
- return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
- /* enable voltage control */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
- return 0;
-}
-
-/**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param vol_table the pointer to changing voltage table
-* @return 0 in success
-*/
-
-static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr,
- struct pp_atomctrl_voltage_table *vol_table)
-{
- uint32_t i, j;
- uint16_t vvalue;
- bool found = false;
- struct pp_atomctrl_voltage_table *table;
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "Voltage Table empty.", return -EINVAL);
- table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
- GFP_KERNEL);
-
- if (NULL == table)
- return -ENOMEM;
-
- table->mask_low = vol_table->mask_low;
- table->phase_delay = vol_table->phase_delay;
-
- for (i = 0; i < vol_table->count; i++) {
- vvalue = vol_table->entries[i].value;
- found = false;
-
- for (j = 0; j < table->count; j++) {
- if (vvalue == table->entries[j].value) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- table->entries[table->count].value = vvalue;
- table->entries[table->count].smio_low =
- vol_table->entries[i].smio_low;
- table->count++;
- }
- }
-
- memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
- kfree(table);
-
- return 0;
-}
-
-static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].mvdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = fiji_trim_voltage_table(hwmgr, vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim MVDD table.", return result);
-
- return 0;
-}
-
-static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].vddci;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = fiji_trim_voltage_table(hwmgr, vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim VDDCI table.", return result);
-
- return 0;
-}
-
-static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- int i = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != lookup_table->count),
- "Voltage Lookup Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
-
- vol_table->count = lookup_table->count;
-
- for (i = 0; i < vol_table->count; i++) {
- vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-/* ---- Voltage Tables ----
- * If the voltage table would be bigger than
- * what will fit into the state table on
- * the SMC keep only the higher entries.
- */
-static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr,
- uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table)
-{
- unsigned int i, diff;
-
- if (vol_table->count <= max_vol_steps)
- return;
-
- diff = vol_table->count - max_vol_steps;
-
- for (i = 0; i < max_vol_steps; i++)
- vol_table->entries[i] = vol_table->entries[i + diff];
-
- vol_table->count = max_vol_steps;
-
- return;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- int result;
-
- if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
- &(data->mvdd_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve MVDD table.",
- return result);
- } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- result = fiji_get_svi2_mvdd_voltage_table(hwmgr,
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.",
- return result;);
- }
-
- if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
- &(data->vddci_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDCI table.",
- return result);
- } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- result = fiji_get_svi2_vddci_voltage_table(hwmgr,
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.",
- return result);
- }
-
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- result = fiji_get_svi2_vdd_voltage_table(hwmgr,
- table_info->vddc_lookup_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from lookup table.",
- return result);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)),
- "Too many voltage values for VDDC. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)),
- "Too many voltage values for VDDCI. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)),
- "Too many voltage values for MVDD. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)));
-
- return 0;
-}
-
-static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- /* Program additional LP registers
- * that are no longer programmed by VBIOS
- */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
- return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_program_static_screen_threshold_parameters(
- struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Set static screen threshold unit */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
- data->static_screen_threshold_unit);
- /* Set static screen threshold */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
- data->static_screen_threshold);
-
- return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
- uint32_t displayGap =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL);
-
- displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP, DISPLAY_GAP_IGNORE);
-
- displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, displayGap);
-
- return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Clear reset for voting clients before enabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
- return 0;
-}
-
-static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
- /* Reset voting clients before disabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, 0);
-
- return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, DpmTable),
- &tmp, data->sram_end);
-
- if (0 == result)
- data->dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, SoftRegisters),
- &tmp, data->sram_end);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcRegisterTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->mc_reg_table_start = tmp;
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, FanTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, Version),
- &tmp, data->sram_end);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
- uint32_t arb_src, uint32_t arb_dest)
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint32_t burst_time;
- uint32_t mc_cg_config;
-
- switch (arb_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_dest) {
- case MC_CG_ARB_FREQ_F0:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
- break;
- case MC_CG_ARB_FREQ_F1:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
- mc_cg_config |= 0x0000000F;
- cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
- return 0;
-}
-
-/**
-* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return if success then 0;
-*/
-static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
- return fiji_copy_and_switch_arb_sets(hwmgr,
- MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
- uint32_t tmp;
-
- tmp = (cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
- 0x0000ff00) >> 8;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return fiji_copy_and_switch_arb_sets(hwmgr,
- tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
- struct fiji_single_dpm_table *dpm_table, uint32_t count)
-{
- int i;
- PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER,
- "Fatal error, can not set up single DPM table entries "
- "to exceed max number!",);
-
- dpm_table->count = count;
- for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
- dpm_table->dpm_levels[i].enabled = false;
-
- return 0;
-}
-
-static void fiji_setup_pcie_table_entry(
- struct fiji_single_dpm_table *dpm_table,
- uint32_t index, uint32_t pcie_gen,
- uint32_t pcie_lanes)
-{
- dpm_table->dpm_levels[index].value = pcie_gen;
- dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint32_t i, max_entry;
-
- PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
- data->use_pcie_power_saving_levels), "No pcie performance levels!",
- return -EINVAL);
-
- if (data->use_pcie_performance_levels &&
- !data->use_pcie_power_saving_levels) {
- data->pcie_gen_power_saving = data->pcie_gen_performance;
- data->pcie_lane_power_saving = data->pcie_lane_performance;
- } else if (!data->use_pcie_performance_levels &&
- data->use_pcie_power_saving_levels) {
- data->pcie_gen_performance = data->pcie_gen_power_saving;
- data->pcie_lane_performance = data->pcie_lane_power_saving;
- }
-
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK);
-
- if (pcie_table != NULL) {
- /* max_entry is used to make sure we reserve one PCIE level
- * for boot level (fix for A+A PSPP issue).
- * If PCIE table from PPTable have ULV entry + 8 entries,
- * then ignore the last entry.*/
- max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU73_MAX_LEVELS_LINK : pcie_table->count;
- for (i = 1; i < max_entry; i++) {
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- pcie_table->entries[i].gen_speed),
- get_pcie_lane_support(data->pcie_lane_cap,
- pcie_table->entries[i].lane_width));
- }
- data->dpm_table.pcie_speed_table.count = max_entry - 1;
- } else {
- /* Hardcode Pcie Table */
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- data->dpm_table.pcie_speed_table.count = 6;
- }
- /* Populate last level for boot PCIE level, but do not increment count. */
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
- data->dpm_table.pcie_speed_table.count,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
- "SCLK dependency table has to have is missing. "
- "This table is mandatory",
- return -EINVAL);
-
- PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
- "MCLK dependency table has to have is missing. "
- "This table is mandatory",
- return -EINVAL);
-
- /* clear the state table to reset everything to default */
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS);
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY);
-
- /* Initialize Sclk DPM table based on allow Sclk values */
- data->dpm_table.sclk_table.count = 0;
- for (i = 0; i < dep_sclk_table->count; i++) {
- if (i == 0 || data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count - 1].value !=
- dep_sclk_table->entries[i].clk) {
- data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count].value =
- dep_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.sclk_table.count++;
- }
- }
-
- /* Initialize Mclk DPM table based on allow Mclk values */
- data->dpm_table.mclk_table.count = 0;
- for (i=0; i<dep_mclk_table->count; i++) {
- if ( i==0 || data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count - 1].value !=
- dep_mclk_table->entries[i].clk) {
- data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count].value =
- dep_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.mclk_table.count++;
- }
- }
-
- /* setup PCIE gen speed levels */
- fiji_setup_default_pcie_table(hwmgr);
-
- /* save a copy of the default DPM table */
- memcpy(&(data->golden_dpm_table), &(data->dpm_table),
- sizeof(struct fiji_dpm_table));
-
- return 0;
-}
-
-/**
- * @brief PhwFiji_GetVoltageOrder
- * Returns index of requested voltage record in lookup(table)
- * @param lookup_table - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t fiji_get_voltage_index(
- struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
-{
- uint8_t count = (uint8_t) (lookup_table->count);
- uint8_t i;
-
- PP_ASSERT_WITH_CODE((NULL != lookup_table),
- "Lookup Table empty.", return 0);
- PP_ASSERT_WITH_CODE((0 != count),
- "Lookup Table empty.", return 0);
-
- for (i = 0; i < lookup_table->count; i++) {
- /* find first voltage equal or bigger than requested */
- if (lookup_table->entries[i].us_vdd >= voltage)
- return i;
- }
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
- for( count = 0; count<lookup_table->count; count++) {
- index = fiji_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 -
- (lookup_table->entries[index].us_cac_low *
- VOLTAGE_SCALE)) / 25);
- table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 -
- (lookup_table->entries[index].us_cac_high *
- VOLTAGE_SCALE)) / 25);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result;
-
- result = fiji_populate_cac_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_Ulv *state)
-{
- int result = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 );
-
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
- }
- return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int32_t fiji_get_dpm_level_enable_mask_value(
- struct fiji_single_dpm_table* dpm_table)
-{
- int32_t i;
- int32_t mask = 0;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask = mask << 1;
- if (dpm_table->dpm_levels[i - 1].enabled)
- mask |= 0x1;
- else
- mask &= 0xFFFFFFFE;
- }
- return mask;
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t ref_clock;
- uint32_t ref_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
- ref_clock = atomctrl_get_reference_clock(hwmgr);
- ref_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider */
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup */
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- struct pp_atomctrl_internal_ss_info ssInfo;
-
- uint32_t vco_freq = clock * dividers.uc_pll_post_div;
- if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
- vco_freq, &ssInfo)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- *
- * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
- */
- uint32_t clk_s = ref_clock * 5 /
- (ref_divider * ssInfo.speed_spectrum_rate);
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
- fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
- CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
- }
- }
-
- sclk->SclkFrequency = clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
-{
- uint32_t i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vddci_table =
- &(data->vddci_voltage_table);
-
- for (i = 0; i < vddci_table->count; i++) {
- if (vddci_table->entries[i].value >= vddci)
- return vddci_table->entries[i].value;
- }
-
- PP_ASSERT_WITH_CODE(false,
- "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i-1].value);
-}
-
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table* dep_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- *voltage = *mvdd = 0;
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = fiji_find_closest_vddci(hwmgr,
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = fiji_find_closest_vddci(hwmgr,
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
- uint32_t clock_insr)
-{
- uint8_t i;
- uint32_t temp;
- uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
-
- PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
- for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
- temp = clock >> i;
-
- if (temp >= min || i == 0)
- break;
- }
- return i;
-}
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU73_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t threshold, mvdd;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
- /* populate graphics levels */
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- &level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
-
- level->SclkFrequency = clock;
- level->ActivityLevel = sclk_al_threshold;
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
-
- threshold = clock * data->fast_watermark_threshold / 100;
-
-
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
- return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- struct SMU73_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = fiji_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)data->activity_target[i],
- &levels[i]);
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now.*/
- levels[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry)? i : max_entry);
- } else {
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0 ))
- hightest_pcie_level_enabled++;
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0 ))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 ))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) <
- hightest_pcie_level_enabled?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for(i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz, 800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
- if (mem_clock <= 10000) return 0x0;
- if (mem_clock <= 15000) return 0x1;
- if (mem_clock <= 20000) return 0x2;
- if (mem_clock <= 25000) return 0x3;
- if (mem_clock <= 30000) return 0x4;
- if (mem_clock <= 35000) return 0x5;
- if (mem_clock <= 40000) return 0x6;
- if (mem_clock <= 45000) return 0x7;
- if (mem_clock <= 50000) return 0x8;
- if (mem_clock <= 55000) return 0x9;
- if (mem_clock <= 60000) return 0xa;
- if (mem_clock <= 65000) return 0xb;
- if (mem_clock <= 70000) return 0xc;
- if (mem_clock <= 75000) return 0xd;
- if (mem_clock <= 80000) return 0xe;
- /* mem_clock > 800MHz */
- return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the memory clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
- struct pp_atomctrl_memory_clock_param mem_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to get Memory PLL Dividers.",);
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = clock;
- mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
- mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
-
- return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
-
- if (table_info->vdd_dep_on_mclk) {
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- &mem_level->MinVoltage, &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
-
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- /* enable stutter mode if all the follow condition applied
- * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
- * &(data->DisplayTiming.numExistingDisplays));
- */
- data->display_timing.num_existing_displays = 1;
-
- if ((data->mclk_stutter_mode_threshold) &&
- (clock <= data->mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled) &&
- (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
- SMU73_MAX_LEVELS_MEMORY;
- struct SMU73_Discrete_MemoryLevel *levels =
- data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = fiji_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now. */
- levels[0].EnabledForActivity = 1;
-
- /* in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high */
- levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_atomctrl_clock_dividers_vi dividers;
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (!data->sclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- table->ACPILevel.SclkFrequency =
- data->dpm_table.sclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- table->ACPILevel.SclkFrequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table",);
- } else {
- table->ACPILevel.SclkFrequency =
- data->vbios_boot_state.sclk_bootup_value;
- table->ACPILevel.MinVoltage =
- data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
- }
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- if (!data->mclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",);
- } else {
- table->MemoryACPILevel.MclkFrequency =
- data->vbios_boot_state.mclk_bootup_value;
- table->MemoryACPILevel.MinVoltage =
- data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
- }
-
- us_mvdd = 0;
- if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!fiji_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = false;
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for(count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- table->VceLevel[count].MinVoltage |=
- ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->AcpLevelCount = (uint8_t)(mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burstTime;
- ULONG state, trrds, trrdl;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
- state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
- trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
- trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
- arb_regs->TRRDS = (uint8_t)trrds;
- arb_regs->TRRDL = (uint8_t)trrdl;
-
- return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = fiji_populate_memory_timing_parameters(hwmgr,
- data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result)
- break;
- }
- }
-
- if (!result)
- result = fiji_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU73_Discrete_MCArbDramTimingTable),
- data->sram_end);
- return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
- }
- return result;
-}
-
-static int fiji_find_boot_level(struct fiji_single_dpm_table *table,
- uint32_t value, uint32_t *boot_level)
-{
- int result = -EINVAL;
- uint32_t i;
-
- for (i = 0; i < table->count; i++) {
- if (value == table->dpm_levels[i].value) {
- *boot_level = i;
- result = 0;
- }
- }
- return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = fiji_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = fiji_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
- for (level = 0; level < count; level++) {
- if(table_info->vdd_dep_on_sclk->entries[level].clk >=
- data->vbios_boot_state.sclk_bootup_value) {
- data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if(table_info->vdd_dep_on_mclk->entries[level].clk >=
- data->vbios_boot_state.mclk_bootup_value) {
- data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][0];
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
- GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (fiji_clock_stretch_amount_conversion
- [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >=
- (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
- cks_setting |= 0x2;
- if (clock_freq <
- (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
- cks_setting |= 0x1;
- }
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",);
- }
- /* Set Vddci Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
-static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table);
- const struct fiji_ulv_parm *ulv = &(data->ulv);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
- result = fiji_setup_default_dpm_tables(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup default DPM tables!", return result);
-
- if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
- fiji_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = fiji_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
- }
-
- result = fiji_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = fiji_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = fiji_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = fiji_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result);
-
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = fiji_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = fiji_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = fiji_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = fiji_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = fiji_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- FIJI_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- FIJI_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = fiji_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = FIJI_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = FIJI_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
- data->sram_end);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, &tmp, data->sram_end);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return fiji_write_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, tmp, data->sram_end);
-}
-
-static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
- if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
- return 0;
-}
-
-static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- SCLK_PWRMGT_OFF, 0);
- return 0;
-}
-
-static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
- return 0;
-}
-
-static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
- return 0;
-}
-
-static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to enable Master Deep Sleep switch failed!",
- return -1);
- } else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t val, val0, val2;
- uint32_t i, cpl_cntl, cpl_threshold, mc_threshold;
-
- /* enable SCLK dpm */
- if(!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
- "Failed to enable SCLK DPM during DPM Start Function!",
- return -1);
-
- /* enable MCLK dpm */
- if(0 == data->mclk_dpm_key_disabled) {
- cpl_threshold = 0;
- mc_threshold = 0;
-
- /* Read per MCD tile (0 - 7) */
- for (i = 0; i < 8; i++) {
- PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i);
- val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000;
- if (0xf0000000 != val) {
- /* count number of MCQ that has channel(s) enabled */
- cpl_threshold++;
- /* only harvest 3 or full 4 supported */
- mc_threshold = val ? 3 : 4;
- }
- }
- PP_ASSERT_WITH_CODE(0 != cpl_threshold,
- "Number of MCQ is zero!", return -EINVAL;);
-
- mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) <<
- LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) |
- LCAC_MC0_CNTL__MC0_ENABLE_MASK;
- cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) <<
- LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) |
- LCAC_CPL_CNTL__CPL_ENABLE_MASK;
- cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT));
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, mc_threshold);
- if (8 == cpl_threshold) {
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC2_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC3_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC4_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC5_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC6_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC7_CNTL, mc_threshold);
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, cpl_cntl);
-
- udelay(5);
-
- mc_threshold = mc_threshold |
- (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT);
- cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, mc_threshold);
- if (8 == cpl_threshold) {
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC2_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC3_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC4_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC5_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC6_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC7_CNTL, mc_threshold);
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, cpl_cntl);
-
- /* Program CAC_EN per MCD (0-7) Tile */
- val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD);
- val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MC_RD_ENABLE_MASK);
-
- for (i = 0; i < 8; i++) {
- /* Enable MCD i Tile read & write */
- val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) |
- (1 << i));
- cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2);
- /* Enbale CAC_ON MCD i Tile */
- val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL);
- val2 |= MC_SEQ_CNTL__CAC_EN_MASK;
- cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2);
- }
- /* Set MC_CONFIG_MCD back to its default setting val0 */
- cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Enable)),
- "Failed to enable MCLK DPM during DPM Start Function!",
- return -1);
- }
- return 0;
-}
-
-static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /*enable general power management */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 1);
- /* enable sclk deep sleep */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 1);
- /* prepare for PCIE DPM */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + offsetof(SMU73_SoftRegisters,
- VoltageChangeTimeout), 0x1000);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- SWRST_COMMAND_1, RESETLC, 0x0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -1);
-
- if (fiji_enable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- /* enable PCIE dpm */
- if(!data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Enable)),
- "Failed to enable pcie DPM during DPM Start Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -1);
-
- /* disable MCLK dpm */
- if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
- "Failed to force MCLK DPM0!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* disable general power management */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 0);
- /* disable sclk deep sleep */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 0);
-
- /* disable PCIE dpm */
- if (!data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Disable) == 0),
- "Failed to disable pcie DPM during DPM Stop Function!",
- return -1);
- }
-
- if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Disable) == 0),
- "Failed to disable voltage DPM during DPM Stop Function!",
- return -1);
-
- return 0;
-}
-
-static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
- uint32_t sources)
-{
- bool protection;
- enum DPM_EVENT_SRC src;
-
- switch (sources) {
- default:
- printk(KERN_ERR "Unknown throttling event sources.");
- /* fall through */
- case 0:
- protection = false;
- /* src is unused */
- break;
- case (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << PHM_AutoThrottleSource_External):
- protection = true;
- src = DPM_EVENT_SRC_EXTERNAL;
- break;
- case (1 << PHM_AutoThrottleSource_External) |
- (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
- break;
- }
- /* Order matters - don't enable thermal protection for the wrong source. */
- if (protection) {
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
- DPM_EVENT_SRC, src);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS,
- !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController));
- } else
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS, 1);
-}
-
-static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!(data->active_auto_throttle_sources & (1 << source))) {
- data->active_auto_throttle_sources |= 1 << source;
- fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->active_auto_throttle_sources & (1 << source)) {
- data->active_auto_throttle_sources &= ~(1 << source);
- fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1;
- PP_ASSERT_WITH_CODE(result == 0,
- "DPM is already running right now, no need to enable DPM!",
- return 0);
-
- if (fiji_voltage_control(hwmgr)) {
- tmp_result = fiji_enable_voltage_control(hwmgr);
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "Failed to enable voltage control!",
- result = tmp_result);
- }
-
- if (fiji_voltage_control(hwmgr)) {
- tmp_result = fiji_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!",
- result = tmp_result);
- }
-
- tmp_result = fiji_initialize_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize MC reg table!", result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
- tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program static screen threshold parameters!",
- result = tmp_result);
-
- tmp_result = fiji_enable_display_gap(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable display gap!", result = tmp_result);
-
- tmp_result = fiji_program_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program voting clients!", result = tmp_result);
-
- tmp_result = fiji_process_firmware_header(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to process firmware header!", result = tmp_result);
-
- tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!",
- result = tmp_result);
-
- tmp_result = fiji_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize SMC table!", result = tmp_result);
-
- tmp_result = fiji_init_arb_table_index(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize ARB table index!", result = tmp_result);
-
- tmp_result = fiji_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate PM fuses!", result = tmp_result);
-
- tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
- tmp_result = tonga_notify_smc_display_change(hwmgr, false);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify no display!", result = tmp_result);
-
- tmp_result = fiji_enable_sclk_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SCLK control!", result = tmp_result);
-
- tmp_result = fiji_enable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ULV!", result = tmp_result);
-
- tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable deep sleep master switch!", result = tmp_result);
-
- tmp_result = fiji_start_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start DPM!", result = tmp_result);
-
- tmp_result = fiji_enable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SMC CAC!", result = tmp_result);
-
- tmp_result = fiji_enable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable power containment!", result = tmp_result);
-
- tmp_result = fiji_power_control_set_level(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to power control set level!", result = tmp_result);
-
- tmp_result = fiji_enable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable thermal auto throttle!", result = tmp_result);
-
- return result;
-}
-
-static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is not running right now, no need to disable DPM!",
- return 0);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
- tmp_result = fiji_disable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable power containment!", result = tmp_result);
-
- tmp_result = fiji_disable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable SMC CAC!", result = tmp_result);
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
- tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable thermal auto throttle!", result = tmp_result);
-
- tmp_result = fiji_stop_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to stop DPM!", result = tmp_result);
-
- tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable deep sleep master switch!", result = tmp_result);
-
- tmp_result = fiji_disable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable ULV!", result = tmp_result);
-
- tmp_result = fiji_clear_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to clear voting clients!", result = tmp_result);
-
- tmp_result = fiji_reset_to_default(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to reset to default!", result = tmp_result);
-
- tmp_result = fiji_force_switch_to_arbf0(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to force to switch arbf0!", result = tmp_result);
-
- return result;
-}
-
-static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t level, tmp;
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- (1 << level));
- }
- }
- return 0;
-}
-
-static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- phm_apply_dal_min_voltage_request(hwmgr);
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- }
- return 0;
-}
-
-static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!fiji_is_dpm_running(hwmgr))
- return -EINVAL;
-
- if (!data->pcie_dpm_key_disabled) {
- smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
- }
-
- return fiji_upload_dpmlevel_enable_mask(hwmgr);
-}
-
-static uint32_t fiji_get_lowest_enabled_level(
- struct pp_hwmgr *hwmgr, uint32_t mask)
-{
- uint32_t level = 0;
-
- while(0 == (mask & (1 << level)))
- level++;
-
- return level;
-}
-
-static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data =
- (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t level;
-
- if (!data->sclk_dpm_key_disabled)
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
-
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- (1 << level));
- }
- }
-
- return 0;
-
-}
-static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = fiji_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = fiji_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = fiji_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- hwmgr->dpm_level = level;
-
- return ret;
-}
-
-static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
- return sizeof(struct fiji_power_state);
-}
-
-static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
- void *state, struct pp_power_state *power_state,
- void *pp_table, uint32_t classification_flag)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_power_state *fiji_power_state =
- (struct fiji_power_state *)(&(power_state->hardware));
- struct fiji_performance_level *performance_level;
- ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
- ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
- (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
- ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (ATOM_Tonga_SCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
- ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
- (ATOM_Tonga_MCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
- /* The following fields are not initialized here: id orderedList allStatesList */
- power_state->classification.ui_label =
- (le16_to_cpu(state_entry->usClassification) &
- ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
- ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
- power_state->classification.flags = classification_flag;
- /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
- power_state->classification.temporary_state = false;
- power_state->classification.to_be_deleted = false;
-
- power_state->validation.disallowOnDC =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_DISALLOW_ON_DC));
-
- power_state->pcie.lanes = 0;
-
- power_state->display.disableFrameModulation = false;
- power_state->display.limitRefreshrate = false;
- power_state->display.enableVariBright =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_ENABLE_VARIBRIGHT));
-
- power_state->validation.supportedPowerLevels = 0;
- power_state->uvd_clocks.VCLK = 0;
- power_state->uvd_clocks.DCLK = 0;
- power_state->temperatures.min = 0;
- power_state->temperatures.max = 0;
-
- performance_level = &(fiji_power_state->performance_levels
- [fiji_power_state->performance_level_count++]);
-
- PP_ASSERT_WITH_CODE(
- (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS),
- "Performance levels exceeds SMC limit!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (fiji_power_state->performance_level_count <=
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -1);
-
- /* Performance levels are arranged from low to high. */
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexLow].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
- [state_entry->ucEngineClockIndexLow].ulSclk;
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenLow);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- performance_level = &(fiji_power_state->performance_levels
- [fiji_power_state->performance_level_count++]);
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexHigh].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
- [state_entry->ucEngineClockIndexHigh].ulSclk;
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenHigh);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- return 0;
-}
-
-static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index, struct pp_power_state *state)
-{
- int result;
- struct fiji_power_state *ps;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- state->hardware.magic = PHM_VIslands_Magic;
-
- ps = (struct fiji_power_state *)(&state->hardware);
-
- result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
- fiji_get_pp_table_entry_callback_func);
-
- /* This is the earliest time we have all the dependency table and the VBIOS boot state
- * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
- * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
- */
- if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
- if (dep_mclk_table->entries[0].clk !=
- data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot MCLK level");
- if (dep_mclk_table->entries[0].vddci !=
- data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot VDDCI level");
- }
-
- /* set DC compatible flag if this state supports DC */
- if (!state->validation.disallowOnDC)
- ps->dc_compatible = true;
-
- if (state->classification.flags & PP_StateClassificationFlag_ACPI)
- data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
- ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
- ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
- if (!result) {
- uint32_t i;
-
- switch (state->classification.ui_label) {
- case PP_StateUILabel_Performance:
- data->use_pcie_performance_levels = true;
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_performance.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_performance.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_performance.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.max =
- ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_performance.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- case PP_StateUILabel_Battery:
- data->use_pcie_power_saving_levels = true;
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_power_saving.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_power_saving.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_power_saving.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.max =
- ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_power_saving.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- default:
- break;
- }
- }
- return 0;
-}
-
-static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *request_ps,
- const struct pp_power_state *current_ps)
-{
- struct fiji_power_state *fiji_ps =
- cast_phw_fiji_power_state(&request_ps->hardware);
- uint32_t sclk;
- uint32_t mclk;
- struct PP_Clocks minimum_clocks = {0};
- bool disable_mclk_switching;
- bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- const struct phm_clock_and_voltage_limits *max_limits;
- uint32_t i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int32_t count;
- int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
- data->battery_state = (PP_StateUILabel_Battery ==
- request_ps->classification.ui_label);
-
- PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",);
-
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
- &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
- &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
- /* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk)
- fiji_ps->performance_levels[i].memory_clock = max_limits->mclk;
- if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk)
- fiji_ps->performance_levels[i].engine_clock = max_limits->sclk;
- }
- }
-
- fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
- fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
- /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
- stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
- for (count = table_info->vdd_dep_on_sclk->count - 1;
- count >= 0; count--) {
- if (stable_pstate_sclk >=
- table_info->vdd_dep_on_sclk->entries[count].clk) {
- stable_pstate_sclk =
- table_info->vdd_dep_on_sclk->entries[count].clk;
- break;
- }
- }
-
- if (count < 0)
- stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
- stable_pstate_mclk = max_limits->mclk;
-
- minimum_clocks.engineClock = stable_pstate_sclk;
- minimum_clocks.memoryClock = stable_pstate_mclk;
- }
-
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- fiji_ps->performance_levels[1].engine_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- fiji_ps->performance_levels[1].memory_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
-
- sclk = fiji_ps->performance_levels[0].engine_clock;
- mclk = fiji_ps->performance_levels[0].memory_clock;
-
- if (disable_mclk_switching)
- mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
-
- if (sclk < minimum_clocks.engineClock)
- sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
- max_limits->sclk : minimum_clocks.engineClock;
-
- if (mclk < minimum_clocks.memoryClock)
- mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
- max_limits->mclk : minimum_clocks.memoryClock;
-
- fiji_ps->performance_levels[0].engine_clock = sclk;
- fiji_ps->performance_levels[0].memory_clock = mclk;
-
- fiji_ps->performance_levels[1].engine_clock =
- (fiji_ps->performance_levels[1].engine_clock >=
- fiji_ps->performance_levels[0].engine_clock) ?
- fiji_ps->performance_levels[1].engine_clock :
- fiji_ps->performance_levels[0].engine_clock;
-
- if (disable_mclk_switching) {
- if (mclk < fiji_ps->performance_levels[1].memory_clock)
- mclk = fiji_ps->performance_levels[1].memory_clock;
-
- fiji_ps->performance_levels[0].memory_clock = mclk;
- fiji_ps->performance_levels[1].memory_clock = mclk;
- } else {
- if (fiji_ps->performance_levels[1].memory_clock <
- fiji_ps->performance_levels[0].memory_clock)
- fiji_ps->performance_levels[1].memory_clock =
- fiji_ps->performance_levels[0].memory_clock;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
- fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
- fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
- fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
- }
- }
-
- return 0;
-}
-
-static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- uint32_t sclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].engine_clock;
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- uint32_t mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= sclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- if(data->display_timing.min_clock_in_sr !=
- hwmgr->display_config.min_core_set_clock_in_sr)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i = 0; i < mclk_table->count; i++) {
- if (mclk == mclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= mclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
- return 0;
-}
-
-static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
- const struct fiji_power_state *fiji_ps)
-{
- uint32_t i;
- uint32_t sclk, max_sclk = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- sclk = fiji_ps->performance_levels[i].engine_clock;
- if (max_sclk < sclk)
- max_sclk = sclk;
- }
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
- return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
- dpm_table->pcie_speed_table.dpm_levels
- [dpm_table->pcie_speed_table.count - 1].value :
- dpm_table->pcie_speed_table.dpm_levels[i].value);
- }
-
- return 0;
-}
-
-static int fiji_request_link_speed_change_before_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_nps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- const struct fiji_power_state *fiji_cps =
- cast_const_phw_fiji_power_state(states->pcurrent_state);
-
- uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps);
- uint16_t current_link_speed;
-
- if (data->force_pcie_gen == PP_PCIEGenInvalid)
- current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps);
- else
- current_link_speed = data->force_pcie_gen;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch(target_link_speed) {
- case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
- break;
- data->force_pcie_gen = PP_PCIEGen2;
- if (current_link_speed == PP_PCIEGen2)
- break;
- case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
- break;
- default:
- data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- data->pspp_notify_required = true;
- }
-
- return 0;
-}
-
-static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
- "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
- "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result = 0;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t sclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].engine_clock;
- uint32_t mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
- struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count - 1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on the default values
- */
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->sclk_table.count < 2 ?
- 0 : dpm_table->sclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (sclk > golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value) {
- clock_percent =
- ((sclk - golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value) * 100) /
- golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value +
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent)/100;
-
- } else if (golden_dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count-1].value > sclk) {
- clock_percent =
- ((golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value - sclk) *
- 100) /
- golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value -
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
- dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count - 1].value = mclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->mclk_table.count < 2 ?
- 0 : dpm_table->mclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (mclk > golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value) {
- clock_percent = ((mclk -
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value) * 100) /
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value +
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
-
- } else if (golden_dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count-1].value > mclk) {
- clock_percent = ((golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) /
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value -
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
- /*populate MCLK dpm table to SMU7 */
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- return result;
-}
-
-static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
- struct fiji_single_dpm_table * dpm_table,
- uint32_t low_limit, uint32_t high_limit)
-{
- uint32_t i;
-
- for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit) ||
- (dpm_table->dpm_levels[i].value > high_limit))
- dpm_table->dpm_levels[i].enabled = false;
- else
- dpm_table->dpm_levels[i].enabled = true;
- }
- return 0;
-}
-
-static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
- const struct fiji_power_state *fiji_ps)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t high_limit_count;
-
- PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1),
- "power state did not have any performance level",
- return -1);
-
- high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1;
-
- fiji_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.sclk_table),
- fiji_ps->performance_levels[0].engine_clock,
- fiji_ps->performance_levels[high_limit_count].engine_clock);
-
- fiji_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.mclk_table),
- fiji_ps->performance_levels[0].memory_clock,
- fiji_ps->performance_levels[high_limit_count].memory_clock);
-
- return 0;
-}
-
-static int fiji_generate_dpm_level_enable_mask(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
-
- result = fiji_trim_dpm_states(hwmgr, fiji_ps);
- if (result)
- return result;
-
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
- data->last_mclk_dpm_enable_mask =
- data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-
- if (data->uvd_enabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
- data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
- }
-
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_VCEDPM_Enable :
- PPSMC_MSG_VCEDPM_Disable);
-}
-
-int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_SAMUDPM_Enable :
- PPSMC_MSG_SAMUDPM_Disable);
-}
-
-int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_ACPDPM_Enable :
- PPSMC_MSG_ACPDPM_Disable);
-}
-
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
- }
-
- return fiji_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_nps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- const struct fiji_power_state *fiji_cps =
- cast_const_phw_fiji_power_state(states->pcurrent_state);
-
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (fiji_nps->vce_clks.evclk >0 &&
- (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) {
- data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
-
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << data->smc_state_table.VceBootLevel);
-
- fiji_enable_disable_vce_dpm(hwmgr, true);
- } else if (fiji_nps->vce_clks.evclk == 0 &&
- fiji_cps != NULL &&
- fiji_cps->vce_clks.evclk > 0)
- fiji_enable_disable_vce_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.SamuBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
- }
-
- return fiji_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.AcpBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, AcpBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFF00FF;
- mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.AcpBootLevel));
- }
-
- return fiji_enable_disable_acp_dpm(hwmgr, !bgate);
-}
-
-static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = fiji_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- data->sram_end);
- }
-
- return result;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return fiji_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- data->need_update_smu7_dpm_table = 0;
-
- return 0;
-}
-
-/* Look up the voltaged based on DAL's requested level.
- * and then send the requested VDDC voltage to SMC
- */
-static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
- return;
-}
-
-int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Apply minimum voltage based on DAL's request level */
- fiji_apply_dal_minimum_voltage_request(hwmgr);
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this,
- * we should skip this message.
- */
- if (!fiji_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ] "
- "Trying to set Enable Mask when DPM is disabled \n");
-
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Sclk Dpm enable Mask failed", return -1);
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this,
- * we should skip this message.
- */
- if (!fiji_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ]"
- " Trying to set Enable Mask when DPM is disabled \n");
-
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Mclk Dpm enable Mask failed", return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_notify_link_speed_change_after_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps);
- uint8_t request;
-
- if (data->pspp_notify_required) {
- if (target_link_speed == PP_PCIEGen3)
- request = PCIE_PERF_REQ_GEN3;
- else if (target_link_speed == PP_PCIEGen2)
- request = PCIE_PERF_REQ_GEN2;
- else
- request = PCIE_PERF_REQ_GEN1;
-
- if(request == PCIE_PERF_REQ_GEN1 &&
- fiji_get_current_pcie_speed(hwmgr) > 0)
- return 0;
-
- if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
- if (PP_PCIEGen2 == target_link_speed)
- printk("PSPP request to switch to Gen2 from Gen3 Failed!");
- else
- printk("PSPP request to switch to Gen1 from Gen2 Failed!");
- }
- }
-
- return 0;
-}
-
-static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr,
- const void *input)
-{
- int tmp_result, result = 0;
-
- tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to find DPM states clocks in DPM table!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- fiji_request_link_speed_change_before_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to request link speed change before state change!",
- result = tmp_result);
- }
-
- tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate and upload SCLK MCLK DPM levels!",
- result = tmp_result);
-
- tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to generate DPM level enabled mask!",
- result = tmp_result);
-
- tmp_result = fiji_update_vce_dpm(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update VCE DPM!",
- result = tmp_result);
-
- tmp_result = fiji_update_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update SCLK threshold!",
- result = tmp_result);
-
- tmp_result = fiji_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program memory timing parameters!",
- result = tmp_result);
-
- tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to unfreeze SCLK MCLK DPM!",
- result = tmp_result);
-
- tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to upload DPM level enabled mask!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- fiji_notify_link_speed_change_after_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify link speed change after state change!",
- result = tmp_result);
- }
-
- return result;
-}
-
-static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- if (low)
- return fiji_ps->performance_levels[0].engine_clock;
- else
- return fiji_ps->performance_levels
- [fiji_ps->performance_level_count-1].engine_clock;
-}
-
-static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- if (low)
- return fiji_ps->performance_levels[0].memory_clock;
- else
- return fiji_ps->performance_levels
- [fiji_ps->performance_level_count-1].memory_clock;
-}
-
-static void fiji_print_current_perforce_level(
- struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- uint32_t sclk, mclk, activity_percent = 0;
- uint32_t offset;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
- mclk / 100, sclk / 100);
-
- offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
- activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
- activity_percent += 0x80;
- activity_percent >>= 8;
-
- seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
- seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
- seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t num_active_displays = 0;
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
- uint32_t display_gap2;
- uint32_t pre_vbi_time_in_us;
- uint32_t frame_time_in_us;
- uint32_t ref_clock;
- uint32_t refresh_rate = 0;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_displays = info.display_count;
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP, (num_active_displays > 0)?
- DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- ref_clock = mode_info.ref_clock;
- refresh_rate = mode_info.refresh_rate;
-
- if (refresh_rate == 0)
- refresh_rate = 60;
-
- frame_time_in_us = 1000000 / refresh_rate;
-
- pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
- display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- offsetof(SMU73_SoftRegisters, VBlankTimeout),
- (frame_time_in_us - pre_vbi_time_in_us));
-
- if (num_active_displays == 1)
- tonga_notify_smc_display_change(hwmgr, true);
-
- return 0;
-}
-
-int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
- return fiji_program_display_gap(hwmgr);
-}
-
-static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr,
- uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
- uint16_t us_max_fan_rpm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-int fiji_dpm_set_interrupt_state(void *private_data,
- unsigned src_id, unsigned type,
- int enabled)
-{
- uint32_t cg_thermal_int;
- struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- switch (type) {
- case AMD_THERMAL_IRQ_LOW_TO_HIGH:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
-
- case AMD_THERMAL_IRQ_HIGH_TO_LOW:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info)
-{
- int result;
- const struct pp_interrupt_registration_info *info =
- (const struct pp_interrupt_registration_info *)
- thermal_interrupt_info;
-
- if (info == NULL)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
- fiji_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
- fiji_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
- fiji_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
-}
-
-static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->fan_ctrl_is_in_default_mode)
- return hwmgr->fan_ctrl_default_mode;
- else
- return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
-
- case PP_MCLK:
- if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
-
- case PP_PCIE:
- {
- uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
-
- while (tmp >>= 1)
- level++;
-
- if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, char *buf)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = fiji_get_current_pcie_speed(hwmgr);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
- return size;
-}
-
-static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
- const struct fiji_performance_level *pl2)
-{
- return ((pl1->memory_clock == pl2->memory_clock) &&
- (pl1->engine_clock == pl2->engine_clock) &&
- (pl1->pcie_gen == pl2->pcie_gen) &&
- (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
- const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
- const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
- int i;
-
- if (equal == NULL || psa == NULL || psb == NULL)
- return -EINVAL;
-
- /* If the two states don't even have the same number of performance levels they cannot be the same state. */
- if (psa->performance_level_count != psb->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < psa->performance_level_count; i++) {
- if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
- /* If we have found even one performance level pair that is different the states are different. */
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
- *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
- *equal &= (psa->sclk_threshold == psb->sclk_threshold);
- *equal &= (psa->acp_clk == psb->acp_clk);
-
- return 0;
-}
-
-bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- bool is_update_required = false;
- struct cgs_display_info info = {0,0,NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- is_update_required = true;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
- is_update_required = true;
- }
-
- return is_update_required;
-}
-
-static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct fiji_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct fiji_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-
-static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
- .backend_init = &fiji_hwmgr_backend_init,
- .backend_fini = &fiji_hwmgr_backend_fini,
- .asic_setup = &fiji_setup_asic_task,
- .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
- .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
- .force_dpm_level = &fiji_dpm_force_dpm_level,
- .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
- .get_power_state_size = &fiji_get_power_state_size,
- .get_pp_table_entry = &fiji_get_pp_table_entry,
- .patch_boot_state = &fiji_patch_boot_state,
- .apply_state_adjust_rules = &fiji_apply_state_adjust_rules,
- .power_state_set = &fiji_set_power_state_tasks,
- .get_sclk = &fiji_dpm_get_sclk,
- .get_mclk = &fiji_dpm_get_mclk,
- .print_current_perforce_level = &fiji_print_current_perforce_level,
- .powergate_uvd = &fiji_phm_powergate_uvd,
- .powergate_vce = &fiji_phm_powergate_vce,
- .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating,
- .notify_smc_display_config_after_ps_adjustment =
- &tonga_notify_smc_display_config_after_ps_adjustment,
- .display_config_changed = &fiji_display_configuration_changed_task,
- .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output,
- .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output,
- .get_temperature = fiji_thermal_get_temperature,
- .stop_thermal_controller = fiji_thermal_stop_thermal_controller,
- .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent,
- .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default,
- .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm,
- .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller,
- .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
- .set_fan_control_mode = fiji_set_fan_control_mode,
- .get_fan_control_mode = fiji_get_fan_control_mode,
- .check_states_equal = fiji_check_states_equal,
- .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
- .force_clock_level = fiji_force_clock_level,
- .print_clock_levels = fiji_print_clock_levels,
- .get_sclk_od = fiji_get_sclk_od,
- .set_sclk_od = fiji_set_sclk_od,
- .get_mclk_od = fiji_get_mclk_od,
- .set_mclk_od = fiji_set_mclk_od,
-};
-
-int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
- hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
- hwmgr->pptable_func = &tonga_pptable_funcs;
- pp_fiji_thermal_initialize(hwmgr);
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
deleted file mode 100644
index bf67c2a92c68..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _FIJI_HWMGR_H_
-#define _FIJI_HWMGR_H_
-
-#include "hwmgr.h"
-#include "smu73.h"
-#include "smu73_discrete.h"
-#include "ppatomctrl.h"
-#include "fiji_ppsmc.h"
-#include "pp_endian.h"
-
-#define FIJI_MAX_HARDWARE_POWERLEVELS 2
-#define FIJI_AT_DFLT 30
-
-#define FIJI_VOLTAGE_CONTROL_NONE 0x0
-#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2
-#define FIJI_VOLTAGE_CONTROL_MERGED 0x3
-
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-
-struct fiji_performance_level {
- uint32_t memory_clock;
- uint32_t engine_clock;
- uint16_t pcie_gen;
- uint16_t pcie_lane;
-};
-
-struct fiji_uvd_clocks {
- uint32_t vclk;
- uint32_t dclk;
-};
-
-struct fiji_vce_clocks {
- uint32_t evclk;
- uint32_t ecclk;
-};
-
-struct fiji_power_state {
- uint32_t magic;
- struct fiji_uvd_clocks uvd_clks;
- struct fiji_vce_clocks vce_clks;
- uint32_t sam_clk;
- uint32_t acp_clk;
- uint16_t performance_level_count;
- bool dc_compatible;
- uint32_t sclk_threshold;
- struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct fiji_dpm_level {
- bool enabled;
- uint32_t value;
- uint32_t param1;
-};
-
-#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define FIJI_MINIMUM_ENGINE_CLOCK 2500
-
-struct fiji_single_dpm_table {
- uint32_t count;
- struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct fiji_dpm_table {
- struct fiji_single_dpm_table sclk_table;
- struct fiji_single_dpm_table mclk_table;
- struct fiji_single_dpm_table pcie_speed_table;
- struct fiji_single_dpm_table vddc_table;
- struct fiji_single_dpm_table vddci_table;
- struct fiji_single_dpm_table mvdd_table;
-};
-
-struct fiji_clock_registers {
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t vDLL_CNTL;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_SS1;
- uint32_t vMPLL_SS2;
-};
-
-struct fiji_voltage_smio_registers {
- uint32_t vS0_VID_LOWER_SMIO_CNTL;
-};
-
-#define FIJI_MAX_LEAKAGE_COUNT 8
-struct fiji_leakage_voltage {
- uint16_t count;
- uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT];
- uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
-};
-
-struct fiji_vbios_boot_state {
- uint16_t mvdd_bootup_value;
- uint16_t vddc_bootup_value;
- uint16_t vddci_bootup_value;
- uint32_t sclk_bootup_value;
- uint32_t mclk_bootup_value;
- uint16_t pcie_gen_bootup_value;
- uint16_t pcie_lane_bootup_value;
-};
-
-struct fiji_bacos {
- uint32_t best_match;
- uint32_t baco_flags;
- struct fiji_performance_level performance_level;
-};
-
-/* Ultra Low Voltage parameter structure */
-struct fiji_ulv_parm {
- bool ulv_supported;
- uint32_t cg_ulv_parameter;
- uint32_t ulv_volt_change_delay;
- struct fiji_performance_level ulv_power_level;
-};
-
-struct fiji_display_timing {
- uint32_t min_clock_in_sr;
- uint32_t num_existing_displays;
-};
-
-struct fiji_dpmlevel_enable_mask {
- uint32_t uvd_dpm_enable_mask;
- uint32_t vce_dpm_enable_mask;
- uint32_t acp_dpm_enable_mask;
- uint32_t samu_dpm_enable_mask;
- uint32_t sclk_dpm_enable_mask;
- uint32_t mclk_dpm_enable_mask;
- uint32_t pcie_dpm_enable_mask;
-};
-
-struct fiji_pcie_perf_range {
- uint16_t max;
- uint16_t min;
-};
-
-struct fiji_hwmgr {
- struct fiji_dpm_table dpm_table;
- struct fiji_dpm_table golden_dpm_table;
-
- uint32_t voting_rights_clients0;
- uint32_t voting_rights_clients1;
- uint32_t voting_rights_clients2;
- uint32_t voting_rights_clients3;
- uint32_t voting_rights_clients4;
- uint32_t voting_rights_clients5;
- uint32_t voting_rights_clients6;
- uint32_t voting_rights_clients7;
- uint32_t static_screen_threshold_unit;
- uint32_t static_screen_threshold;
- uint32_t voltage_control;
- uint32_t vddc_vddci_delta;
-
- uint32_t active_auto_throttle_sources;
-
- struct fiji_clock_registers clock_registers;
- struct fiji_voltage_smio_registers voltage_smio_registers;
-
- bool is_memory_gddr5;
- uint16_t acpi_vddc;
- bool pspp_notify_required;
- uint16_t force_pcie_gen;
- uint16_t acpi_pcie_gen;
- uint32_t pcie_gen_cap;
- uint32_t pcie_lane_cap;
- uint32_t pcie_spc_cap;
- struct fiji_leakage_voltage vddc_leakage;
- struct fiji_leakage_voltage Vddci_leakage;
-
- uint32_t mvdd_control;
- uint32_t vddc_mask_low;
- uint32_t mvdd_mask_low;
- uint16_t max_vddc_in_pptable;
- uint16_t min_vddc_in_pptable;
- uint16_t max_vddci_in_pptable;
- uint16_t min_vddci_in_pptable;
- uint32_t mclk_strobe_mode_threshold;
- uint32_t mclk_stutter_mode_threshold;
- uint32_t mclk_edc_enable_threshold;
- uint32_t mclk_edcwr_enable_threshold;
- bool is_uvd_enabled;
- struct fiji_vbios_boot_state vbios_boot_state;
-
- bool battery_state;
- bool is_tlu_enabled;
-
- /* ---- SMC SRAM Address of firmware header tables ---- */
- uint32_t sram_end;
- uint32_t dpm_table_start;
- uint32_t soft_regs_start;
- uint32_t mc_reg_table_start;
- uint32_t fan_table_start;
- uint32_t arb_table_start;
- struct SMU73_Discrete_DpmTable smc_state_table;
- struct SMU73_Discrete_Ulv ulv_setting;
-
- /* ---- Stuff originally coming from Evergreen ---- */
- uint32_t vddci_control;
- struct pp_atomctrl_voltage_table vddc_voltage_table;
- struct pp_atomctrl_voltage_table vddci_voltage_table;
- struct pp_atomctrl_voltage_table mvdd_voltage_table;
-
- uint32_t mgcg_cgtt_local2;
- uint32_t mgcg_cgtt_local3;
- uint32_t gpio_debug;
- uint32_t mc_micro_code_feature;
- uint32_t highest_mclk;
- uint16_t acpi_vddci;
- uint8_t mvdd_high_index;
- uint8_t mvdd_low_index;
- bool dll_default_on;
- bool performance_request_registered;
-
- /* ---- Low Power Features ---- */
- struct fiji_bacos bacos;
- struct fiji_ulv_parm ulv;
-
- /* ---- CAC Stuff ---- */
- uint32_t cac_table_start;
- bool cac_configuration_required;
- bool driver_calculate_cac_leakage;
- bool cac_enabled;
-
- /* ---- DPM2 Parameters ---- */
- uint32_t power_containment_features;
- bool enable_dte_feature;
- bool enable_tdc_limit_feature;
- bool enable_pkg_pwr_tracking_feature;
- bool disable_uvd_power_tune_feature;
- const struct fiji_pt_defaults *power_tune_defaults;
- struct SMU73_Discrete_PmFuses power_tune_table;
- uint32_t dte_tj_offset;
- uint32_t fast_watermark_threshold;
-
- /* ---- Phase Shedding ---- */
- bool vddc_phase_shed_control;
-
- /* ---- DI/DT ---- */
- struct fiji_display_timing display_timing;
-
- /* ---- Thermal Temperature Setting ---- */
- struct fiji_dpmlevel_enable_mask dpm_level_enable_mask;
- uint32_t need_update_smu7_dpm_table;
- uint32_t sclk_dpm_key_disabled;
- uint32_t mclk_dpm_key_disabled;
- uint32_t pcie_dpm_key_disabled;
- uint32_t min_engine_clocks;
- struct fiji_pcie_perf_range pcie_gen_performance;
- struct fiji_pcie_perf_range pcie_lane_performance;
- struct fiji_pcie_perf_range pcie_gen_power_saving;
- struct fiji_pcie_perf_range pcie_lane_power_saving;
- bool use_pcie_performance_levels;
- bool use_pcie_power_saving_levels;
- uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
- uint32_t mclk_activity_target;
- uint32_t mclk_dpm0_activity_target;
- uint32_t low_sclk_interrupt_threshold;
- uint32_t last_mclk_dpm_enable_mask;
- bool uvd_enabled;
-
- /* ---- Power Gating States ---- */
- bool uvd_power_gated;
- bool vce_power_gated;
- bool samu_power_gated;
- bool acp_power_gated;
- bool pg_acp_init;
- bool frtc_enabled;
- bool frtc_status_changed;
-};
-
-/* To convert to Q8.8 format for firmware */
-#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256
-
-enum Fiji_I2CLineID {
- Fiji_I2CLineID_DDC1 = 0x90,
- Fiji_I2CLineID_DDC2 = 0x91,
- Fiji_I2CLineID_DDC3 = 0x92,
- Fiji_I2CLineID_DDC4 = 0x93,
- Fiji_I2CLineID_DDC5 = 0x94,
- Fiji_I2CLineID_DDC6 = 0x95,
- Fiji_I2CLineID_SCLSDA = 0x96,
- Fiji_I2CLineID_DDCVGA = 0x97
-};
-
-#define Fiji_I2C_DDC1DATA 0
-#define Fiji_I2C_DDC1CLK 1
-#define Fiji_I2C_DDC2DATA 2
-#define Fiji_I2C_DDC2CLK 3
-#define Fiji_I2C_DDC3DATA 4
-#define Fiji_I2C_DDC3CLK 5
-#define Fiji_I2C_SDA 40
-#define Fiji_I2C_SCL 41
-#define Fiji_I2C_DDC4DATA 65
-#define Fiji_I2C_DDC4CLK 66
-#define Fiji_I2C_DDC5DATA 0x48
-#define Fiji_I2C_DDC5CLK 0x49
-#define Fiji_I2C_DDC6DATA 0x4a
-#define Fiji_I2C_DDC6CLK 0x4b
-#define Fiji_I2C_DDCVGADATA 0x4c
-#define Fiji_I2C_DDCVGACLK 0x4d
-
-#define FIJI_UNUSED_GPIO_PIN 0x7F
-
-extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-
-#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
deleted file mode 100644
index 44658451a8d2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "smumgr.h"
-#include "fiji_hwmgr.h"
-#include "fiji_powertune.h"
-#include "fiji_smumgr.h"
-#include "smu73_discrete.h"
-#include "pp_debug.h"
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-
-const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
- {1, 0xF, 0xFD,
- /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
- 0x19, 5, 45}
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t tmp = 0;
-
- if(table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- fiji_hwmgr->power_tune_defaults =
- &fiji_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
- /* Assume disabled */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SQRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DBRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TDRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TCPRamping);
-
- fiji_hwmgr->dte_tj_offset = tmp;
-
- if (!tmp) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
- fiji_hwmgr->fast_watermark_threshold = 100;
-
- if (hwmgr->powercontainment_enabled) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
- tmp = 1;
- fiji_hwmgr->enable_dte_feature = tmp ? false : true;
- fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
- fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
- }
- }
-}
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
-{
- switch (line) {
- case Fiji_I2CLineID_DDC1 :
- *scl = Fiji_I2C_DDC1CLK;
- *sda = Fiji_I2C_DDC1DATA;
- break;
- case Fiji_I2CLineID_DDC2 :
- *scl = Fiji_I2C_DDC2CLK;
- *sda = Fiji_I2C_DDC2DATA;
- break;
- case Fiji_I2CLineID_DDC3 :
- *scl = Fiji_I2C_DDC3CLK;
- *sda = Fiji_I2C_DDC3DATA;
- break;
- case Fiji_I2CLineID_DDC4 :
- *scl = Fiji_I2C_DDC4CLK;
- *sda = Fiji_I2C_DDC4DATA;
- break;
- case Fiji_I2CLineID_DDC5 :
- *scl = Fiji_I2C_DDC5CLK;
- *sda = Fiji_I2C_DDC5DATA;
- break;
- case Fiji_I2CLineID_DDC6 :
- *scl = Fiji_I2C_DDC6CLK;
- *sda = Fiji_I2C_DDC6DATA;
- break;
- case Fiji_I2CLineID_SCLSDA :
- *scl = Fiji_I2C_SCL;
- *sda = Fiji_I2C_SDA;
- break;
- case Fiji_I2CLineID_DDCVGA :
- *scl = Fiji_I2C_DDCVGACLK;
- *sda = Fiji_I2C_DDCVGADATA;
- break;
- default:
- *scl = 0;
- *sda = 0;
- break;
- }
-}
-
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
- SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table=
- &hwmgr->thermal_controller.advanceFanControlParameters;
- uint8_t uc_scl, uc_sda;
-
- /* TDP number of fraction bits are changed from 8 to 7 for Fiji
- * as requested by SMC team
- */
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",);
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
- /* The following are for new Fiji Multi-input fan/thermal control */
- dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid1 * 256);
- dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid2 * 256);
- dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrVddc * 256);
- dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrMvdd * 256);
- dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitPlx * 256);
-
- dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
- dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainLiquid));
- dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrVddc));
- dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
- dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainPlx));
- dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHbm));
-
- dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
- dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
- dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
- dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
- get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Liquid_I2C_LineSCL = uc_scl;
- dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Vr_I2C_LineSCL = uc_scl;
- dpm_table->Vr_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Plx_I2C_LineSCL = uc_scl;
- dpm_table->Plx_I2C_LineSDA = uc_sda;
-
- return 0;
-}
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
- data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- data->power_tune_table.SviLoadLineTrimVddC = 3;
- data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
- uint32_t temp;
-
- if (fiji_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if( (hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- 0 == hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity )
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- /* int i, min, max;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
- uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
-
- min = max = pHiVID[0];
- for (i = 0; i < 8; i++) {
- if (0 != pHiVID[i]) {
- if (min > pHiVID[i])
- min = pHiVID[i];
- if (max < pHiVID[i])
- max = pHiVID[i];
- }
-
- if (0 != pLoVID[i]) {
- if (min > pLoVID[i])
- min = pLoVID[i];
- if (max < pLoVID[i])
- max = pLoVID[i];
- }
- }
-
- PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
- data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
- data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
-*/
- return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
- return 0;
-}
-
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- /* DW6 */
- if (fiji_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
- /* DW7 */
- if (fiji_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
- /* DW8 */
- if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != fiji_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW14 */
- if(fiji_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (fiji_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW19 */
- if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW20 */
- if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&data->power_tune_table,
- sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC)) {
- int smc_result;
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_EnableCac));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable CAC in SMC.", result = -1);
-
- data->cac_enabled = (0 == smc_result) ? true : false;
- }
- return result;
-}
-
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC) && data->cac_enabled) {
- int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_DisableCac));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable CAC in SMC.", result = -1);
-
- data->cac_enabled = false;
- }
- return result;
-}
-
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if(data->power_containment_features &
- POWERCONTAINMENT_FEATURE_PkgPwrLimit)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PkgPwrSetLimit, n);
- return 0;
-}
-
-static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
-{
- return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
- PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
-}
-
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int smc_result;
- int result = 0;
-
- data->power_containment_features = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (data->enable_dte_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_EnableDTE));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable DTE in SMC.", result = -1;);
- if (0 == smc_result)
- data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
- }
-
- if (data->enable_tdc_limit_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_TDCLimitEnable));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable TDCLimit in SMC.", result = -1;);
- if (0 == smc_result)
- data->power_containment_features |=
- POWERCONTAINMENT_FEATURE_TDCLimit;
- }
-
- if (data->enable_pkg_pwr_tracking_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable PkgPwrTracking in SMC.", result = -1;);
- if (0 == smc_result) {
- struct phm_cac_tdp_table *cac_table =
- table_info->cac_dtp_table;
- uint32_t default_limit =
- (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
- data->power_containment_features |=
- POWERCONTAINMENT_FEATURE_PkgPwrLimit;
-
- if (fiji_set_power_limit(hwmgr, default_limit))
- printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
- }
- }
- }
- return result;
-}
-
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment) &&
- data->power_containment_features) {
- int smc_result;
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_TDCLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_TDCLimitDisable));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable TDCLimit in SMC.",
- result = smc_result);
- }
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_DTE) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_DisableDTE));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable DTE in SMC.",
- result = smc_result);
- }
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable PkgPwrTracking in SMC.",
- result = smc_result);
- }
- data->power_containment_features = 0;
- }
-
- return result;
-}
-
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
- int adjust_percent, target_tdp;
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- /* adjustment percentage has already been validated */
- adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
- hwmgr->platform_descriptor.TDPAdjustment :
- (-1 * hwmgr->platform_descriptor.TDPAdjustment);
- /* SMC requested that target_tdp to be 7 bit fraction in DPM table
- * but message to be 8 bit fraction for messages
- */
- target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
- result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
- }
-
- return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
deleted file mode 100644
index fec772421733..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_POWERTUNE_H
-#define FIJI_POWERTUNE_H
-
-enum fiji_pt_config_reg_type {
- FIJI_CONFIGREG_MMR = 0,
- FIJI_CONFIGREG_SMC_IND,
- FIJI_CONFIGREG_DIDT_IND,
- FIJI_CONFIGREG_CACHE,
- FIJI_CONFIGREG_MAX
-};
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
-
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-
-struct fiji_pt_config_reg {
- uint32_t offset;
- uint32_t mask;
- uint32_t shift;
- uint32_t value;
- enum fiji_pt_config_reg_type type;
-};
-
-struct fiji_pt_defaults
-{
- uint8_t SviLoadLineEn;
- uint8_t SviLoadLineVddC;
- uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
- uint8_t TDC_MAWt;
- uint8_t TdcWaterfallCtl;
- uint8_t DTEAmbientTempBase;
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
-
-#endif /* FIJI_POWERTUNE_H */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
deleted file mode 100644
index 8621493b8574..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_THERMAL_H
-#define FIJI_THERMAL_H
-
-#include "hwmgr.h"
-
-#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1
-#define FIJI_THERMAL_LOW_ALERT_MASK 0x2
-
-#define FIJI_THERMAL_MINIMUM_TEMP_READING -256
-#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255
-
-#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0
-#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 789f98ad2615..14f8c1f4da3d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -24,8 +24,6 @@
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "power_state.h"
-#include "pp_acpi.h"
-#include "amd_acpi.h"
#include "pp_debug.h"
#define PHM_FUNC_CHECK(hw) \
@@ -34,38 +32,6 @@
return -EINVAL; \
} while (0)
-void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
-{
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
-
- if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
- acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-}
-
bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
{
return hwmgr->block_hw_access;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 27e07624ac28..1167205057b3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -32,13 +32,22 @@
#include "pp_debug.h"
#include "ppatomctrl.h"
#include "ppsmc.h"
-
-#define VOLTAGE_SCALE 4
+#include "pp_acpi.h"
+#include "amd_acpi.h"
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
+
+static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
+static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
+static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+ return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
{
@@ -56,10 +65,12 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
hwmgr->device = pp_init->device;
hwmgr->chip_family = pp_init->chip_family;
hwmgr->chip_id = pp_init->chip_id;
- hwmgr->hw_revision = pp_init->rev_id;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
hwmgr->power_source = PP_PowerSource_AC;
- hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
+ hwmgr->pp_table_version = PP_TABLE_V1;
+
+ hwmgr_init_default_caps(hwmgr);
+ hwmgr_set_user_specify_caps(hwmgr);
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CZ:
@@ -67,26 +78,38 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
+ case CHIP_TOPAZ:
+ topaz_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
+ hwmgr->pp_table_version = PP_TABLE_V0;
+ break;
case CHIP_TONGA:
- tonga_hwmgr_init(hwmgr);
+ tonga_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK);
break;
case CHIP_FIJI:
- fiji_hwmgr_init(hwmgr);
+ fiji_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
- polaris10_hwmgr_init(hwmgr);
+ polaris_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
default:
return -EINVAL;
}
+ smu7_hwmgr_init(hwmgr);
break;
default:
return -EINVAL;
}
- phm_init_dynamic_caps(hwmgr);
-
return 0;
}
@@ -105,6 +128,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
kfree(hwmgr->set_temperature_range.function_list);
kfree(hwmgr->ps);
+ kfree(hwmgr->current_ps);
+ kfree(hwmgr->request_ps);
kfree(hwmgr);
return 0;
}
@@ -129,10 +154,17 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
sizeof(struct pp_power_state);
hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
-
if (hwmgr->ps == NULL)
return -ENOMEM;
+ hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->request_ps == NULL)
+ return -ENOMEM;
+
+ hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->current_ps == NULL)
+ return -ENOMEM;
+
state = hwmgr->ps;
for (i = 0; i < table_entries; i++) {
@@ -140,7 +172,8 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
if (state->classification.flags & PP_StateClassificationFlag_Boot) {
hwmgr->boot_ps = state;
- hwmgr->current_ps = hwmgr->request_ps = state;
+ memcpy(hwmgr->current_ps, state, size);
+ memcpy(hwmgr->request_ps, state, size);
}
state->id = i + 1; /* assigned unique num for every power state id */
@@ -150,6 +183,7 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
state = (struct pp_power_state *)((unsigned long)state + size);
}
+
return 0;
}
@@ -182,30 +216,6 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
return 0;
}
-int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t index, uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (hwmgr == NULL || hwmgr->device == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
- return -EINVAL;
- }
-
- for (i = 0; i < hwmgr->usec_timeout; i++) {
- cur_value = cgs_read_register(hwmgr->device, index);
- if ((cur_value & mask) != (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == hwmgr->usec_timeout)
- return -1;
- return 0;
-}
-
/**
* Returns once the part of the register indicated by the mask has
@@ -227,21 +237,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
}
-void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (hwmgr == NULL || hwmgr->device == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
- return;
- }
- cgs_write_register(hwmgr->device, indirect_port, index);
- phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
- value, mask);
-}
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
{
@@ -403,12 +399,9 @@ int phm_reset_single_dpm_table(void *table,
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
- PP_ASSERT_WITH_CODE(count <= max,
- "Fatal error, can not set up single DPM table entries to exceed max number!",
- );
+ dpm_table->count = count > max ? max : count;
- dpm_table->count = count;
- for (i = 0; i < max; i++)
+ for (i = 0; i < dpm_table->count; i++)
dpm_table->dpm_level[i].enabled = false;
return 0;
@@ -462,6 +455,27 @@ uint8_t phm_get_voltage_index(
return i - 1;
}
+uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+ uint32_t voltage)
+{
+ uint8_t count = (uint8_t) (voltage_table->count);
+ uint8_t i = 0;
+
+ PP_ASSERT_WITH_CODE((NULL != voltage_table),
+ "Voltage Table empty.", return 0;);
+ PP_ASSERT_WITH_CODE((0 != count),
+ "Voltage Table empty.", return 0;);
+
+ for (i = 0; i < count; i++) {
+ /* find first voltage bigger than requested */
+ if (voltage_table->entries[i].value >= voltage)
+ return i;
+ }
+
+ /* voltage is bigger than max voltage in the table */
+ return i - 1;
+}
+
uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
{
uint32_t i;
@@ -549,7 +563,8 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
table_clk_vlt->entries[2].v = 810;
table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
table_clk_vlt->entries[3].v = 900;
- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+ if (pptable_info != NULL)
+ pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
}
@@ -615,3 +630,186 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
printk(KERN_ERR "DAL requested level can not"
" found a available voltage in VDDC DPM Table \n");
}
+
+void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
+ if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
+ acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicPatchPowerState);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicPowerManagement);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SMC);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicUVDState);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+ return;
+}
+
+int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
+{
+ if (amdgpu_sclk_deep_sleep_en)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep);
+ else
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep);
+
+ if (amdgpu_powercontainment)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ else
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+
+ hwmgr->feature_mask = amdgpu_pp_feature_mask;
+
+ return 0;
+}
+
+int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t id, uint16_t *voltage)
+{
+ uint32_t vol;
+ int ret = 0;
+
+ if (hwmgr->chip_id < CHIP_POLARIS10) {
+ atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+ if (*voltage >= 2000 || *voltage == 0)
+ *voltage = 1150;
+ } else {
+ ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+ *voltage = (uint16_t)vol/100;
+ }
+ return ret;
+}
+
+int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ /* power tune caps Assume disabled */
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ if (hwmgr->chip_id == CHIP_POLARIS11)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport);
+ return 0;
+}
+
+int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ return 0;
+}
+
+int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+
+ return 0;
+}
+
+int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
deleted file mode 100644
index f78ffd935cee..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef POLARIS10_DYN_DEFAULTS_H
-#define POLARIS10_DYN_DEFAULTS_H
-
-
-enum Polaris10dpm_TrendDetection {
- Polaris10Adpm_TrendDetection_AUTO,
- Polaris10Adpm_TrendDetection_UP,
- Polaris10Adpm_TrendDetection_DOWN
-};
-typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
-
-/* We need to fill in the default values */
-
-
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
-
-
-#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200
-#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0
-#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8
-#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
-#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4
-
-#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687
-
-#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035
-#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450
-#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
-#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
deleted file mode 100644
index 769636a0c5b5..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ /dev/null
@@ -1,5290 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <asm/div64.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_powertune.h"
-#include "polaris10_dyn_defaults.h"
-#include "polaris10_smumgr.h"
-#include "pp_debug.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "tonga_pptable.h"
-#include "pppcielanes.h"
-#include "amd_pcie_helpers.h"
-#include "hardwaremanager.h"
-#include "tonga_processpptables.h"
-#include "cgs_common.h"
-#include "smu74.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu74_discrete.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "oss/oss_3_0_d.h"
-#include "gca/gfx_8_0_d.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-#include "polaris10_thermal.h"
-#include "polaris10_clockpowergating.h"
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0 0x05
-#define MC_CG_SEQ_DRAMCONF_S1 0x06
-#define MC_CG_SEQ_YCLK_SUSPEND 0x04
-#define MC_CG_SEQ_YCLK_RESUME 0x0a
-
-
-#define SMC_RAM_END 0x40000
-
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-#define VDDC_VDDCI_DELTA 200
-
-#define MEM_FREQ_LOW_LATENCY 25000
-#define MEM_FREQ_HIGH_LATENCY 80000
-
-#define MEM_LATENCY_HIGH 45
-#define MEM_LATENCY_LOW 35
-#define MEM_LATENCY_ERR 0xFFFF
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-
-#define PCIE_BUS_CLK 10000
-#define TCLK (PCIE_BUS_CLK / 10)
-
-
-static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
- DPM_EVENT_SRC_ANALOG = 0,
- DPM_EVENT_SRC_EXTERNAL = 1,
- DPM_EVENT_SRC_DIGITAL = 2,
- DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
-};
-
-static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct polaris10_power_state *cast_phw_polaris10_power_state(
- struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (struct polaris10_power_state *)hw_ps;
-}
-
-const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
- const struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (const struct polaris10_power_state *)hw_ps;
-}
-
-static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
-{
- cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
- hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
- return 0;
-}
-
-uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
- uint32_t speedCntl = 0;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speedCntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
- uint32_t link_width;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
- PP_ASSERT_WITH_CODE((7 >= link_width),
- "Invalid PCIe lane width!", return 0);
-
- return decode_pcie_lane_width(link_width);
-}
-
-/**
-* Enable voltage control
-*
-* @param pHwMgr the address of the powerplay hardware manager.
-* @return always PP_Result_OK
-*/
-int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(
- (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
- "Failed to enable voltage DPM during DPM Start Function!",
- return 1;
- );
-
- return 0;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-*/
-static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr)
-{
- const struct polaris10_hwmgr *data =
- (const struct polaris10_hwmgr *)(hwmgr->backend);
-
- return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
- /* enable voltage control */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
- return 0;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- int result;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
- &(data->mvdd_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve MVDD table.",
- return result);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.",
- return result;);
- }
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
- &(data->vddci_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDCI table.",
- return result);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.",
- return result);
- }
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
- table_info->vddc_lookup_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from lookup table.",
- return result);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
- "Too many voltage values for VDDC. Trimming to fit state table.",
- phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
- &(data->vddc_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
- "Too many voltage values for VDDCI. Trimming to fit state table.",
- phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
- &(data->vddci_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
- "Too many voltage values for MVDD. Trimming to fit state table.",
- phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
- &(data->mvdd_voltage_table)));
-
- return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_program_static_screen_threshold_parameters(
- struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* Set static screen threshold unit */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
- data->static_screen_threshold_unit);
- /* Set static screen threshold */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
- data->static_screen_threshold);
-
- return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
- uint32_t display_gap =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL);
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP, DISPLAY_GAP_IGNORE);
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* Clear reset for voting clients before enabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
- return 0;
-}
-
-static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
- /* Reset voting clients before disabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, 0);
-
- return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, DpmTable),
- &tmp, data->sram_end);
-
- if (0 == result)
- data->dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, SoftRegisters),
- &tmp, data->sram_end);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcRegisterTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->mc_reg_table_start = tmp;
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, FanTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, Version),
- &tmp, data->sram_end);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
- uint32_t arb_src, uint32_t arb_dest)
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint32_t burst_time;
- uint32_t mc_cg_config;
-
- switch (arb_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_dest) {
- case MC_CG_ARB_FREQ_F0:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
- break;
- case MC_CG_ARB_FREQ_F1:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
- mc_cg_config |= 0x0000000F;
- cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
- return 0;
-}
-
-static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
- return polaris10_copy_and_switch_arb_sets(hwmgr,
- MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
- uint32_t tmp;
-
- tmp = (cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
- 0x0000ff00) >> 8;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return polaris10_copy_and_switch_arb_sets(hwmgr,
- tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint32_t i, max_entry;
-
- PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
- data->use_pcie_power_saving_levels), "No pcie performance levels!",
- return -EINVAL);
-
- if (data->use_pcie_performance_levels &&
- !data->use_pcie_power_saving_levels) {
- data->pcie_gen_power_saving = data->pcie_gen_performance;
- data->pcie_lane_power_saving = data->pcie_lane_performance;
- } else if (!data->use_pcie_performance_levels &&
- data->use_pcie_power_saving_levels) {
- data->pcie_gen_performance = data->pcie_gen_power_saving;
- data->pcie_lane_performance = data->pcie_lane_power_saving;
- }
-
- phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
- SMU74_MAX_LEVELS_LINK,
- MAX_REGULAR_DPM_NUMBER);
-
- if (pcie_table != NULL) {
- /* max_entry is used to make sure we reserve one PCIE level
- * for boot level (fix for A+A PSPP issue).
- * If PCIE table from PPTable have ULV entry + 8 entries,
- * then ignore the last entry.*/
- max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU74_MAX_LEVELS_LINK : pcie_table->count;
- for (i = 1; i < max_entry; i++) {
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- pcie_table->entries[i].gen_speed),
- get_pcie_lane_support(data->pcie_lane_cap,
- pcie_table->entries[i].lane_width));
- }
- data->dpm_table.pcie_speed_table.count = max_entry - 1;
-
- /* Setup BIF_SCLK levels */
- for (i = 0; i < max_entry; i++)
- data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
- } else {
- /* Hardcode Pcie Table */
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- data->dpm_table.pcie_speed_table.count = 6;
- }
- /* Populate last level for boot PCIE level, but do not increment count. */
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
- data->dpm_table.pcie_speed_table.count,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
- "SCLK dependency table has to have is missing."
- "This table is mandatory",
- return -EINVAL);
-
- PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
- "MCLK dependency table has to have is missing."
- "This table is mandatory",
- return -EINVAL);
-
- /* clear the state table to reset everything to default */
- phm_reset_single_dpm_table(
- &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
- phm_reset_single_dpm_table(
- &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
-
-
- /* Initialize Sclk DPM table based on allow Sclk values */
- data->dpm_table.sclk_table.count = 0;
- for (i = 0; i < dep_sclk_table->count; i++) {
- if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
- dep_sclk_table->entries[i].clk) {
-
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
- dep_sclk_table->entries[i].clk;
-
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.sclk_table.count++;
- }
- }
-
- /* Initialize Mclk DPM table based on allow Mclk values */
- data->dpm_table.mclk_table.count = 0;
- for (i = 0; i < dep_mclk_table->count; i++) {
- if (i == 0 || data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count - 1].value !=
- dep_mclk_table->entries[i].clk) {
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
- dep_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.mclk_table.count++;
- }
- }
-
- /* setup PCIE gen speed levels */
- polaris10_setup_default_pcie_table(hwmgr);
-
- /* save a copy of the default DPM table */
- memcpy(&(data->golden_dpm_table), &(data->dpm_table),
- sizeof(struct polaris10_dpm_table));
-
- return 0;
-}
-
-uint8_t convert_to_vid(uint16_t vddc)
-{
- return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t count, level;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- count = data->mvdd_voltage_table.count;
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; level++) {
- table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[level].Smio =
- (uint8_t) level;
- table->Smio[level] |=
- data->mvdd_voltage_table.entries[level].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
- }
-
- return 0;
-}
-
-static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count, level;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- count = data->vddci_voltage_table.count;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; ++level) {
- table->SmioTable1.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
- table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
-
- table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
-
- return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
- for (count = 0; count < lookup_table->count; count++) {
- index = phm_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- polaris10_populate_smc_vddci_table(hwmgr, table);
- polaris10_populate_smc_mvdd_table(hwmgr, table);
- polaris10_populate_cac_table(hwmgr, table);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_Ulv *state)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr)
-{
- uint32_t reference_clock, tmp;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
- if (tmp)
- return TCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- reference_clock = mode_info.ref_clock;
-
- tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
- if (0 != tmp)
- return reference_clock / 4;
-
- return reference_clock;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, SMU_SclkSetting *sclk_setting)
-{
- const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
- struct pp_atomctrl_clock_dividers_ai dividers;
-
- uint32_t ref_clock;
- uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
- uint8_t i;
- int result;
- uint64_t temp;
-
- sclk_setting->SclkFrequency = clock;
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
- if (result == 0) {
- sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
- sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
- sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
- sclk_setting->PllRange = dividers.ucSclkPllRange;
- sclk_setting->Sclk_slew_rate = 0x400;
- sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
- sclk_setting->Pcc_down_slew_rate = 0xffff;
- sclk_setting->SSc_En = dividers.ucSscEnable;
- sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
- sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
- sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
- return result;
- }
-
- ref_clock = polaris10_get_xclk(hwmgr);
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- if (clock > data->range_table[i].trans_lower_frequency
- && clock <= data->range_table[i].trans_upper_frequency) {
- sclk_setting->PllRange = i;
- break;
- }
- }
-
- sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw_frac = temp & 0xffff;
-
- pcc_target_percent = 10; /* Hardcode 10% for now. */
- pcc_target_freq = clock - (clock * pcc_target_percent / 100);
- sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-
- ss_target_percent = 2; /* Hardcode 2% for now. */
- sclk_setting->SSc_En = 0;
- if (ss_target_percent) {
- sclk_setting->SSc_En = 1;
- ss_target_freq = clock - (clock * ss_target_percent / 100);
- sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw1_frac = temp & 0xffff;
- }
-
- return 0;
-}
-
-static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- *voltage = *mvdd = 0;
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] =
-{ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
- {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
- {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
-
-static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t i, ref_clk;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
- struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
-
- ref_clk = polaris10_get_xclk(hwmgr);
-
- if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
- table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
- return;
- }
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
-
- data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
- data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
-
- table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
- table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU74_Discrete_GraphicsLevel *level)
-{
- int result, i, temp;
- /* PP_Clocks minClocks; */
- uint32_t mvdd;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMU_SclkSetting curr_sclk_setting = { 0 };
-
- result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
-
- /* populate graphics levels */
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- &level->MinVoltage, &mvdd);
-
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
- level->ActivityLevel = sclk_al_threshold;
-
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
-
- /*
- * TODO: get minimum clocks from dal configaration
- * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
- */
- /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
-
- /* get level->DeepSleepDivId
- if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
- */
- PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
- for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
- temp = clock >> i;
-
- if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
- break;
- }
-
- level->DeepSleepDivId = i;
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- if (data->update_up_hyst)
- level->UpHyst = (uint8_t)data->up_hyst;
- if (data->update_down_hyst)
- level->DownHyst = (uint8_t)data->down_hyst;
-
- level->SclkSetting = curr_sclk_setting;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
- return 0;
-}
-
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
- struct SMU74_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- polaris10_get_sclk_range_table(hwmgr);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
-
- result = polaris10_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)data->activity_target[i],
- &(data->smc_state_table.GraphicsLevel[i]));
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SPLLShutdownSupport))
- data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
-
- data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
- data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- } else {
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0))
- hightest_pcie_level_enabled++;
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
- hightest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- struct cgs_display_info info = {0, 0, NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (table_info->vdd_dep_on_mclk) {
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- &mem_level->MinVoltage, &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->MclkFrequency = clock;
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- data->display_timing.num_existing_displays = info.display_count;
-
- if ((data->mclk_stutter_mode_threshold) &&
- (clock <= data->mclk_stutter_mode_threshold) &&
- (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
- SMU74_MAX_LEVELS_MEMORY;
- struct SMU74_Discrete_MemoryLevel *levels =
- data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = polaris10_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (i == dpm_table->mclk_table.count - 1) {
- levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
- levels[i].EnabledForActivity = 1;
- }
- if (result)
- return result;
- }
-
- /* In order to prevent MC activity from stutter mode to push DPM up,
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not affected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = 0x1f;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
- /* level count will send to smc once at init smc table and never change */
- result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- uint32_t sclk_frequency;
- const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- sclk_frequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table",
- );
-
-
- result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
- PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- table->ACPILevel.DeepSleepDivId = 0;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
-
-
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",
- );
-
- us_mvdd = 0;
- if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
- table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- table->MemoryACPILevel.StutterEnable = false;
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-
- table->VceLevel[count].MinVoltage |=
- (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burst_time;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burst_time;
-
- return 0;
-}
-
-static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = polaris10_populate_memory_timing_parameters(hwmgr,
- data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result == 0)
- result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
- if (result != 0)
- return result;
- }
- }
-
- result = polaris10_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU74_Discrete_MCArbDramTimingTable),
- data->sram_end);
- return result;
-}
-
-static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
- }
-
- return result;
-}
-
-static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-
-static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_sclk->entries[level].clk >=
- data->vbios_boot_state.sclk_bootup_value) {
- data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_mclk->entries[level].clk >=
- data->vbios_boot_state.mclk_bootup_value) {
- data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint8_t i, stretch_amount, volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (67 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
-
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
- } else {
- min = 1100;
- max = 2100;
- }
-
- ro = efuse * (max -min)/255 + min;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
- (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
- volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
- (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
- } else {
- volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
- (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
- volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
- (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
- }
-
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
-
- data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
- /* Populate CKS Lookup Table */
- if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
- stretch_amount != 4 && stretch_amount != 5) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",
- );
- }
- /* Set Vddci Voltage Controller */
- if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
- offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-
-int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
- int result = 0;
- struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
- AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
- AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
- uint32_t tmp, i;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return result;
-
- result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
-
- if (0 == result) {
- table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
- table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
- table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
- table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
- table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
- table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
- table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
- table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
- table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
- table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
- table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
- table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
- table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
- table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
- table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
- AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
- AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
- AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
- AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
- AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
- AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
- AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
-
- for (i = 0; i < NUM_VFT_COLUMNS; i++) {
- AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
- AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
- }
-
- result = polaris10_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
- &tmp, data->sram_end);
-
- polaris10_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_meanNsigma,
- sizeof(AVFS_meanNsigma_t),
- data->sram_end);
-
- result = polaris10_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
- &tmp, data->sram_end);
- polaris10_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_SclkOffset,
- sizeof(AVFS_Sclk_Offset_t),
- data->sram_end);
-
- data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
- data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
- }
- return result;
-}
-
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
- const struct polaris10_ulv_parm *ulv = &(data->ulv);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
- pp_atomctrl_clock_dividers_vi dividers;
-
- result = polaris10_setup_default_dpm_tables(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup default DPM tables!", return result);
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control)
- polaris10_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = polaris10_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT);
- }
-
- result = polaris10_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = polaris10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = polaris10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = polaris10_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = polaris10_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = polaris10_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = polaris10_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = polaris10_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = polaris10_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = polaris10_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = polaris10_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- result = polaris10_populate_avfs_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
-
- table->CurrSclkPllRange = 0xff;
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = polaris10_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- } else {
- table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
- & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
- && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- /* Populate BIF_SCLK levels into SMC DPM table */
- for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
- PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
-
- if (i == 0)
- table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- else
- table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- }
-
- for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = polaris10_copy_bytes_to_smc(hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
- data->sram_end);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
- const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, &tmp, data->sram_end);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return polaris10_write_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, tmp, data->sram_end);
-}
-
-static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
- return 0;
-}
-
-static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- SCLK_PWRMGT_OFF, 0);
- return 0;
-}
-
-static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
- return 0;
-}
-
-static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
- return 0;
-}
-
-static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to enable Master Deep Sleep switch failed!",
- return -1);
- } else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t soft_register_value = 0;
- uint32_t handshake_disables_offset = data->soft_regs_start
- + offsetof(SMU74_SoftRegisters, HandshakeDisables);
-
- /* enable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
- "Failed to enable SCLK DPM during DPM Start Function!",
- return -1);
-
- /* enable MCLK dpm */
- if (0 == data->mclk_dpm_key_disabled) {
-/* Disable UVD - SMU handshake for MCLK. */
- soft_register_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, handshake_disables_offset);
- soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- handshake_disables_offset, soft_register_value);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Enable)),
- "Failed to enable MCLK DPM during DPM Start Function!",
- return -1);
-
- PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
- udelay(10);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
- }
-
- return 0;
-}
-
-static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /*enable general power management */
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 1);
-
- /* enable sclk deep sleep */
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 1);
-
- /* prepare for PCIE DPM */
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + offsetof(SMU74_SoftRegisters,
- VoltageChangeTimeout), 0x1000);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- SWRST_COMMAND_1, RESETLC, 0x0);
-/*
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -1);
-*/
-
- if (polaris10_enable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- /* enable PCIE dpm */
- if (0 == data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Enable)),
- "Failed to enable pcie DPM during DPM Start Function!",
- return -1);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition)) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableACDCGPIOInterrupt)),
- "Failed to enable AC DC GPIO Interrupt!",
- );
- }
-
- return 0;
-}
-
-static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -1);
-
- /* disable MCLK dpm */
- if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -1);
- }
-
- return 0;
-}
-
-static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* disable general power management */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 0);
- /* disable sclk deep sleep */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 0);
-
- /* disable PCIE dpm */
- if (!data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Disable) == 0),
- "Failed to disable pcie DPM during DPM Stop Function!",
- return -1);
- }
-
- if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- return 0;
-}
-
-static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
-{
- bool protection;
- enum DPM_EVENT_SRC src;
-
- switch (sources) {
- default:
- printk(KERN_ERR "Unknown throttling event sources.");
- /* fall through */
- case 0:
- protection = false;
- /* src is unused */
- break;
- case (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << PHM_AutoThrottleSource_External):
- protection = true;
- src = DPM_EVENT_SRC_EXTERNAL;
- break;
- case (1 << PHM_AutoThrottleSource_External) |
- (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
- break;
- }
- /* Order matters - don't enable thermal protection for the wrong source. */
- if (protection) {
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
- DPM_EVENT_SRC, src);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS,
- !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController));
- } else
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS, 1);
-}
-
-static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!(data->active_auto_throttle_sources & (1 << source))) {
- data->active_auto_throttle_sources |= 1 << source;
- polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->active_auto_throttle_sources & (1 << source)) {
- data->active_auto_throttle_sources &= ~(1 << source);
- polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- data->pcie_performance_request = true;
-
- return 0;
-}
-
-int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
- tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(result == 0,
- "DPM is already running right now, no need to enable DPM!",
- return 0);
-
- if (polaris10_voltage_control(hwmgr)) {
- tmp_result = polaris10_enable_voltage_control(hwmgr);
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "Failed to enable voltage control!",
- result = tmp_result);
-
- tmp_result = polaris10_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!",
- result = tmp_result);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
- tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program static screen threshold parameters!",
- result = tmp_result);
-
- tmp_result = polaris10_enable_display_gap(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable display gap!", result = tmp_result);
-
- tmp_result = polaris10_program_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program voting clients!", result = tmp_result);
-
- tmp_result = polaris10_process_firmware_header(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to process firmware header!", result = tmp_result);
-
- tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!",
- result = tmp_result);
-
- tmp_result = polaris10_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize SMC table!", result = tmp_result);
-
- tmp_result = polaris10_init_arb_table_index(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize ARB table index!", result = tmp_result);
-
- tmp_result = polaris10_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate PM fuses!", result = tmp_result);
-
- tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
-
- tmp_result = polaris10_enable_sclk_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SCLK control!", result = tmp_result);
-
- tmp_result = polaris10_enable_smc_voltage_controller(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable voltage control!", result = tmp_result);
-
- tmp_result = polaris10_enable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ULV!", result = tmp_result);
-
- tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable deep sleep master switch!", result = tmp_result);
-
- tmp_result = polaris10_enable_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to enable deep sleep master switch!", result = tmp_result);
-
- tmp_result = polaris10_start_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start DPM!", result = tmp_result);
-
- tmp_result = polaris10_enable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SMC CAC!", result = tmp_result);
-
- tmp_result = polaris10_enable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable power containment!", result = tmp_result);
-
- tmp_result = polaris10_power_control_set_level(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to power control set level!", result = tmp_result);
-
- tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable thermal auto throttle!", result = tmp_result);
-
- tmp_result = polaris10_pcie_performance_request(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "pcie performance request failed!", result = tmp_result);
-
- return result;
-}
-
-int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is not running right now, no need to disable DPM!",
- return 0);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
- tmp_result = polaris10_disable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable power containment!", result = tmp_result);
-
- tmp_result = polaris10_disable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable SMC CAC!", result = tmp_result);
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
- tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable thermal auto throttle!", result = tmp_result);
-
- tmp_result = polaris10_stop_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to stop DPM!", result = tmp_result);
-
- tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable deep sleep master switch!", result = tmp_result);
-
- tmp_result = polaris10_disable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable ULV!", result = tmp_result);
-
- tmp_result = polaris10_clear_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to clear voting clients!", result = tmp_result);
-
- tmp_result = polaris10_reset_to_default(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to reset to default!", result = tmp_result);
-
- tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to force to switch arbf0!", result = tmp_result);
-
- return result;
-}
-
-int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
-{
-
- return 0;
-}
-
-int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- return phm_hwmgr_backend_fini(hwmgr);
-}
-
-int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPatchPowerState);
-
- if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl);
-
- if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableSMU7ThermalManagement);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPowerManagement);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UnTabledHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SMC);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_NonABMSupportInPPLib);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-
- /* power tune caps Assume disabled */
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SQRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DBRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TDRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TCPRamping);
-
- if (hwmgr->powercontainment_enabled)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
- else
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
- if (hwmgr->chip_id == CHIP_POLARIS11)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SPLLShutdownSupport);
- return 0;
-}
-
-static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- polaris10_initialize_power_tune_defaults(hwmgr);
-
- data->pcie_gen_performance.max = PP_PCIEGen1;
- data->pcie_gen_performance.min = PP_PCIEGen3;
- data->pcie_gen_power_saving.max = PP_PCIEGen1;
- data->pcie_gen_power_saving.min = PP_PCIEGen3;
- data->pcie_lane_performance.max = 0;
- data->pcie_lane_performance.min = 16;
- data->pcie_lane_power_saving.max = 0;
- data->pcie_lane_power_saving.min = 16;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint16_t vv_id;
- uint32_t vddc = 0;
- uint16_t i, j;
- uint32_t sclk = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- int result;
-
- for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) {
- vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (!phm_get_sclk_for_voltage_evv(hwmgr,
- table_info->vddc_lookup_table, vv_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- for (j = 1; j < sclk_table->count; j++) {
- if (sclk_table->entries[j].clk == sclk &&
- sclk_table->entries[j].cks_enable == 0) {
- sclk += 5000;
- break;
- }
- }
- }
-
- if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
- VOLTAGE_TYPE_VDDC,
- sclk, vv_id, &vddc) != 0) {
- printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
- continue;
- }
-
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
- * real voltage level in unit of 0.01mv */
- PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
- "Invalid VDDC value", result = -EINVAL;);
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddc != 0 && vddc != vv_id) {
- data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
- data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
- data->vddc_leakage.count++;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
- */
-static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
- uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table)
-{
- uint32_t index;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (index = 0; index < leakage_table->count; index++) {
- /* if this voltage matches a leakage voltage ID */
- /* patch with actual leakage voltage */
- if (leakage_table->leakage_id[index] == *voltage) {
- *voltage = leakage_table->actual_voltage[index];
- break;
- }
- }
-
- if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pointer to voltage lookup table
-* @param pointer to leakage table
-* @return always 0
-*/
-static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- struct polaris10_leakage_voltage *leakage_table)
-{
- uint32_t i;
-
- for (i = 0; i < lookup_table->count; i++)
- polaris10_patch_with_vdd_leakage(hwmgr,
- &lookup_table->entries[i].us_vdd, leakage_table);
-
- return 0;
-}
-
-static int polaris10_patch_clock_voltage_limits_with_vddc_leakage(
- struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table,
- uint16_t *vddc)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
- hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
- table_info->max_clock_voltage_on_dc.vddc;
- return 0;
-}
-
-static int polaris10_patch_voltage_dependency_tables_with_lookup_table(
- struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
- table_info->vdd_dep_on_mclk;
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- voltageId = mclk_table->entries[entryId].vddInd;
- mclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mm_table->count; ++entryId) {
- voltageId = mm_table->entries[entryId].vddcInd;
- mm_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- return 0;
-
-}
-
-static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage. */
- return 0;
-}
-
-static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage from mm table. */
- return 0;
-}
-
-static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
- table_size = lookup_table->count;
-
- PP_ASSERT_WITH_CODE(0 != lookup_table->count,
- "Lookup table is empty", return -EINVAL);
-
- /* Sorting voltages */
- for (i = 0; i < table_size - 1; i++) {
- for (j = i + 1; j > 0; j--) {
- if (lookup_table->entries[j].us_vdd <
- lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
- }
- }
- }
-
- return 0;
-}
-
-static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- int tmp_result;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr,
- table_info->vddc_lookup_table, &(data->vddc_leakage));
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
- &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
- if (tmp_result)
- result = tmp_result;
-
- return result;
-}
-
-static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- table_info->max_clock_voltage_on_ac.sclk =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.mclk =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.vddc =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
- table_info->max_clock_voltage_on_ac.vddci =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
- hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci;
-
- return 0;
-}
-
-int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- uint32_t i;
-
- if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
- if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
- return 0;
-
- for (i = 0; i < lookup_table->count; i++) {
- if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
- dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
- return 0;
- }
- }
- }
- return 0;
-}
-
-
-int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
- uint32_t temp_reg;
- int result;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
-
- data->dll_default_on = false;
- data->sram_end = SMC_RAM_END;
- data->mclk_dpm0_activity_target = 0xa;
- data->disable_dpm_mask = 0xFF;
- data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
- data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
- data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-
- data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7;
-
- data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
- data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT;
-
- /* need to set voltage control types before EVV patching */
- data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE;
- data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
- data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
-
- data->enable_tdc_limit_feature = true;
- data->enable_pkg_pwr_tracking_feature = true;
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->mclk_stutter_mode_threshold = 40000;
-
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
- data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
- data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
- data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (table_info->cac_dtp_table->usClockStretchAmount != 0)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
-
- polaris10_set_features_platform_caps(hwmgr);
-
- polaris10_patch_voltage_workaround(hwmgr);
- polaris10_init_dpm_defaults(hwmgr);
-
- /* Get leakage voltage based on leakage ID. */
- result = polaris10_get_evv_voltages(hwmgr);
-
- if (result) {
- printk("Get EVV Voltage Failed. Abort Driver loading!\n");
- return -1;
- }
-
- polaris10_complete_dependency_tables(hwmgr);
- polaris10_set_private_data_based_on_pptable(hwmgr);
-
- /* Initalize Dynamic State Adjustment Rule Settings */
- result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
- if (0 == result) {
- struct cgs_system_info sys_info = {0};
-
- data->is_tlu_enabled = false;
-
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- POLARIS10_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
- temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
- switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
- case 0:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
- break;
- case 1:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
- break;
- case 2:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
- break;
- case 3:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
- break;
- case 4:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
- break;
- default:
- PP_ASSERT_WITH_CODE(0,
- "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
- );
- break;
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
- }
-
- if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
- hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
- hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
- (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
- (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
- (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
-
- table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
- (table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0;
-
- table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
- table_info->cac_dtp_table->usOperatingTempStep = 1;
- table_info->cac_dtp_table->usOperatingTempHyst = 1;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
-
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
- table_info->cac_dtp_table->usOperatingTempMinLimit;
-
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
- table_info->cac_dtp_table->usOperatingTempMaxLimit;
-
- hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
- table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
-
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
- table_info->cac_dtp_table->usOperatingTempStep;
-
- hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
- table_info->cac_dtp_table->usTargetOperatingTemp;
- }
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
-
- hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
- hwmgr->platform_descriptor.clockStep.engineClock = 500;
- hwmgr->platform_descriptor.clockStep.memoryClock = 500;
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- polaris10_hwmgr_backend_fini(hwmgr);
- }
-
- return 0;
-}
-
-static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t level, tmp;
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
-
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel, level);
- }
- }
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
-
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
-
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- return 0;
-}
-
-static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- phm_apply_dal_min_voltage_request(hwmgr);
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- }
-
- return 0;
-}
-
-static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!polaris10_is_dpm_running(hwmgr))
- return -EINVAL;
-
- if (!data->pcie_dpm_key_disabled) {
- smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
- }
-
- return polaris10_upload_dpm_level_enable_mask(hwmgr);
-}
-
-static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data =
- (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t level;
-
- if (!data->sclk_dpm_key_disabled)
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = phm_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
-
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = phm_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = phm_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- (level));
- }
- }
-
- return 0;
-
-}
-static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = polaris10_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = polaris10_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = polaris10_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- hwmgr->dpm_level = level;
-
- return ret;
-}
-
-static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
- return sizeof(struct polaris10_power_state);
-}
-
-
-static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *request_ps,
- const struct pp_power_state *current_ps)
-{
-
- struct polaris10_power_state *polaris10_ps =
- cast_phw_polaris10_power_state(&request_ps->hardware);
- uint32_t sclk;
- uint32_t mclk;
- struct PP_Clocks minimum_clocks = {0};
- bool disable_mclk_switching;
- bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- const struct phm_clock_and_voltage_limits *max_limits;
- uint32_t i;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int32_t count;
- int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
- data->battery_state = (PP_StateUILabel_Battery ==
- request_ps->classification.ui_label);
-
- PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",
- );
-
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
- &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
- &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
- /* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
- for (i = 0; i < polaris10_ps->performance_level_count; i++) {
- if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk)
- polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk;
- if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk)
- polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk;
- }
- }
-
- polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
- /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
- stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
- for (count = table_info->vdd_dep_on_sclk->count - 1;
- count >= 0; count--) {
- if (stable_pstate_sclk >=
- table_info->vdd_dep_on_sclk->entries[count].clk) {
- stable_pstate_sclk =
- table_info->vdd_dep_on_sclk->entries[count].clk;
- break;
- }
- }
-
- if (count < 0)
- stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
- stable_pstate_mclk = max_limits->mclk;
-
- minimum_clocks.engineClock = stable_pstate_sclk;
- minimum_clocks.memoryClock = stable_pstate_mclk;
- }
-
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- polaris10_ps->performance_levels[1].engine_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- polaris10_ps->performance_levels[1].memory_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
-
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
-
- sclk = polaris10_ps->performance_levels[0].engine_clock;
- mclk = polaris10_ps->performance_levels[0].memory_clock;
-
- if (disable_mclk_switching)
- mclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].memory_clock;
-
- if (sclk < minimum_clocks.engineClock)
- sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
- max_limits->sclk : minimum_clocks.engineClock;
-
- if (mclk < minimum_clocks.memoryClock)
- mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
- max_limits->mclk : minimum_clocks.memoryClock;
-
- polaris10_ps->performance_levels[0].engine_clock = sclk;
- polaris10_ps->performance_levels[0].memory_clock = mclk;
-
- polaris10_ps->performance_levels[1].engine_clock =
- (polaris10_ps->performance_levels[1].engine_clock >=
- polaris10_ps->performance_levels[0].engine_clock) ?
- polaris10_ps->performance_levels[1].engine_clock :
- polaris10_ps->performance_levels[0].engine_clock;
-
- if (disable_mclk_switching) {
- if (mclk < polaris10_ps->performance_levels[1].memory_clock)
- mclk = polaris10_ps->performance_levels[1].memory_clock;
-
- polaris10_ps->performance_levels[0].memory_clock = mclk;
- polaris10_ps->performance_levels[1].memory_clock = mclk;
- } else {
- if (polaris10_ps->performance_levels[1].memory_clock <
- polaris10_ps->performance_levels[0].memory_clock)
- polaris10_ps->performance_levels[1].memory_clock =
- polaris10_ps->performance_levels[0].memory_clock;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- for (i = 0; i < polaris10_ps->performance_level_count; i++) {
- polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
- polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
- polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
- polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
- }
- }
- return 0;
-}
-
-
-static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct polaris10_power_state *polaris10_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
- if (low)
- return polaris10_ps->performance_levels[0].memory_clock;
- else
- return polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count-1].memory_clock;
-}
-
-static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct polaris10_power_state *polaris10_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
- if (low)
- return polaris10_ps->performance_levels[0].engine_clock;
- else
- return polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count-1].engine_clock;
-}
-
-static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps;
- ATOM_FIRMWARE_INFO_V2_2 *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- /* First retrieve the Boot clocks and VDDC from the firmware info table.
- * We assume here that fw_info is unchanged if this call fails.
- */
- fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
- hwmgr->device, index,
- &size, &frev, &crev);
- if (!fw_info)
- /* During a test, there is no firmware info table. */
- return 0;
-
- /* Patch the state. */
- data->vbios_boot_state.sclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultEngineClock);
- data->vbios_boot_state.mclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultMemoryClock);
- data->vbios_boot_state.mvdd_bootup_value =
- le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
- data->vbios_boot_state.vddc_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCVoltage);
- data->vbios_boot_state.vddci_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
- data->vbios_boot_state.pcie_gen_bootup_value =
- phm_get_current_pcie_speed(hwmgr);
-
- data->vbios_boot_state.pcie_lane_bootup_value =
- (uint16_t)phm_get_current_pcie_lane_number(hwmgr);
-
- /* set boot power state */
- ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
- ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
- ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
- ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
- return 0;
-}
-
-static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
- void *state, struct pp_power_state *power_state,
- void *pp_table, uint32_t classification_flag)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_power_state *polaris10_power_state =
- (struct polaris10_power_state *)(&(power_state->hardware));
- struct polaris10_performance_level *performance_level;
- ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
- ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
- (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
- PPTable_Generic_SubTable_Header *sclk_dep_table =
- (PPTable_Generic_SubTable_Header *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-
- ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
- (ATOM_Tonga_MCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
- /* The following fields are not initialized here: id orderedList allStatesList */
- power_state->classification.ui_label =
- (le16_to_cpu(state_entry->usClassification) &
- ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
- ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
- power_state->classification.flags = classification_flag;
- /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
- power_state->classification.temporary_state = false;
- power_state->classification.to_be_deleted = false;
-
- power_state->validation.disallowOnDC =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_DISALLOW_ON_DC));
-
- power_state->pcie.lanes = 0;
-
- power_state->display.disableFrameModulation = false;
- power_state->display.limitRefreshrate = false;
- power_state->display.enableVariBright =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_ENABLE_VARIBRIGHT));
-
- power_state->validation.supportedPowerLevels = 0;
- power_state->uvd_clocks.VCLK = 0;
- power_state->uvd_clocks.DCLK = 0;
- power_state->temperatures.min = 0;
- power_state->temperatures.max = 0;
-
- performance_level = &(polaris10_power_state->performance_levels
- [polaris10_power_state->performance_level_count++]);
-
- PP_ASSERT_WITH_CODE(
- (polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
- "Performance levels exceeds SMC limit!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (polaris10_power_state->performance_level_count <=
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -1);
-
- /* Performance levels are arranged from low to high. */
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexLow].ulMclk;
- if (sclk_dep_table->ucRevId == 0)
- performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
- [state_entry->ucEngineClockIndexLow].ulSclk;
- else if (sclk_dep_table->ucRevId == 1)
- performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
- [state_entry->ucEngineClockIndexLow].ulSclk;
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenLow);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- performance_level = &(polaris10_power_state->performance_levels
- [polaris10_power_state->performance_level_count++]);
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexHigh].ulMclk;
-
- if (sclk_dep_table->ucRevId == 0)
- performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
- [state_entry->ucEngineClockIndexHigh].ulSclk;
- else if (sclk_dep_table->ucRevId == 1)
- performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
- [state_entry->ucEngineClockIndexHigh].ulSclk;
-
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenHigh);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- return 0;
-}
-
-static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index, struct pp_power_state *state)
-{
- int result;
- struct polaris10_power_state *ps;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- state->hardware.magic = PHM_VIslands_Magic;
-
- ps = (struct polaris10_power_state *)(&state->hardware);
-
- result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
- polaris10_get_pp_table_entry_callback_func);
-
- /* This is the earliest time we have all the dependency table and the VBIOS boot state
- * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
- * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
- */
- if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
- if (dep_mclk_table->entries[0].clk !=
- data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot MCLK level");
- if (dep_mclk_table->entries[0].vddci !=
- data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot VDDCI level");
- }
-
- /* set DC compatible flag if this state supports DC */
- if (!state->validation.disallowOnDC)
- ps->dc_compatible = true;
-
- if (state->classification.flags & PP_StateClassificationFlag_ACPI)
- data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
- ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
- ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
- if (!result) {
- uint32_t i;
-
- switch (state->classification.ui_label) {
- case PP_StateUILabel_Performance:
- data->use_pcie_performance_levels = true;
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_performance.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_performance.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_performance.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.max =
- ps->performance_levels[i].pcie_lane;
- if (data->pcie_lane_performance.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- case PP_StateUILabel_Battery:
- data->use_pcie_power_saving_levels = true;
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_power_saving.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_power_saving.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_power_saving.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.max =
- ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_power_saving.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- default:
- break;
- }
- }
- return 0;
-}
-
-static void
-polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- uint32_t sclk, mclk, activity_percent;
- uint32_t offset;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
- mclk / 100, sclk / 100);
-
- offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
- activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
- activity_percent += 0x80;
- activity_percent >>= 8;
-
- seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
- seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
- seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct polaris10_power_state *polaris10_ps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- uint32_t sclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].engine_clock;
- struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- uint32_t mclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].memory_clock;
- struct PP_Clocks min_clocks = {0};
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= sclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- /* TODO: Check SCLK in DAL's minimum clocks
- * in case DeepSleep divider update is required.
- */
- if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
- (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
- data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK))
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i = 0; i < mclk_table->count; i++) {
- if (mclk == mclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= mclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
- return 0;
-}
-
-static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
- const struct polaris10_power_state *polaris10_ps)
-{
- uint32_t i;
- uint32_t sclk, max_sclk = 0;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-
- for (i = 0; i < polaris10_ps->performance_level_count; i++) {
- sclk = polaris10_ps->performance_levels[i].engine_clock;
- if (max_sclk < sclk)
- max_sclk = sclk;
- }
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
- return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
- dpm_table->pcie_speed_table.dpm_levels
- [dpm_table->pcie_speed_table.count - 1].value :
- dpm_table->pcie_speed_table.dpm_levels[i].value);
- }
-
- return 0;
-}
-
-static int polaris10_request_link_speed_change_before_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_power_state *polaris10_nps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- const struct polaris10_power_state *polaris10_cps =
- cast_const_phw_polaris10_power_state(states->pcurrent_state);
-
- uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps);
- uint16_t current_link_speed;
-
- if (data->force_pcie_gen == PP_PCIEGenInvalid)
- current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps);
- else
- current_link_speed = data->force_pcie_gen;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->pspp_notify_required = false;
-
- if (target_link_speed > current_link_speed) {
- switch (target_link_speed) {
- case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
- break;
- data->force_pcie_gen = PP_PCIEGen2;
- if (current_link_speed == PP_PCIEGen2)
- break;
- case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
- break;
- default:
- data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- data->pspp_notify_required = true;
- }
-
- return 0;
-}
-
-static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
- "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
- "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int polaris10_populate_and_upload_sclk_mclk_dpm_levels(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result = 0;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct polaris10_power_state *polaris10_ps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t sclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].engine_clock;
- uint32_t mclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].memory_clock;
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-
- struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count - 1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on the default values
- */
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
-
- for (i = dpm_count; i > 1; i--) {
- if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
- clock_percent =
- ((sclk
- - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
- ) * 100)
- / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value +
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent)/100;
-
- } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
- clock_percent =
- ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
- - sclk) * 100)
- / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value -
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
- dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count - 1].value = mclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
- clock_percent = ((mclk -
- golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
- / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value +
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
-
- } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
- clock_percent = (
- (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
- * 100)
- / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value -
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = polaris10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
- /*populate MCLK dpm table to SMU7 */
- result = polaris10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- return result;
-}
-
-static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
- struct polaris10_single_dpm_table *dpm_table,
- uint32_t low_limit, uint32_t high_limit)
-{
- uint32_t i;
-
- for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit)
- || (dpm_table->dpm_levels[i].value > high_limit))
- dpm_table->dpm_levels[i].enabled = false;
- else
- dpm_table->dpm_levels[i].enabled = true;
- }
-
- return 0;
-}
-
-static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
- const struct polaris10_power_state *polaris10_ps)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t high_limit_count;
-
- PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1),
- "power state did not have any performance level",
- return -1);
-
- high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1;
-
- polaris10_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.sclk_table),
- polaris10_ps->performance_levels[0].engine_clock,
- polaris10_ps->performance_levels[high_limit_count].engine_clock);
-
- polaris10_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.mclk_table),
- polaris10_ps->performance_levels[0].memory_clock,
- polaris10_ps->performance_levels[high_limit_count].memory_clock);
-
- return 0;
-}
-
-static int polaris10_generate_dpm_level_enable_mask(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_power_state *polaris10_ps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
-
- result = polaris10_trim_dpm_states(hwmgr, polaris10_ps);
- if (result)
- return result;
-
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- PPSMC_MSG_UVDDPM_Enable :
- PPSMC_MSG_UVDDPM_Disable);
-}
-
-int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_VCEDPM_Enable :
- PPSMC_MSG_VCEDPM_Disable);
-}
-
-int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_SAMUDPM_Enable :
- PPSMC_MSG_SAMUDPM_Disable);
-}
-
-int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
- }
-
- return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- else
- data->smc_state_table.VceBootLevel = 0;
-
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << data->smc_state_table.VceBootLevel);
- }
-
- polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
-
- return 0;
-}
-
-int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
- if (!bgate) {
- data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
- }
-
- return polaris10_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = polaris10_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- data->sram_end);
- }
-
- return result;
-}
-
-static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return polaris10_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
- PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
- PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- data->need_update_smu7_dpm_table = 0;
-
- return 0;
-}
-
-static int polaris10_notify_link_speed_change_after_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_power_state *polaris10_ps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps);
- uint8_t request;
-
- if (data->pspp_notify_required) {
- if (target_link_speed == PP_PCIEGen3)
- request = PCIE_PERF_REQ_GEN3;
- else if (target_link_speed == PP_PCIEGen2)
- request = PCIE_PERF_REQ_GEN2;
- else
- request = PCIE_PERF_REQ_GEN1;
-
- if (request == PCIE_PERF_REQ_GEN1 &&
- phm_get_current_pcie_speed(hwmgr) > 0)
- return 0;
-
- if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
- if (PP_PCIEGen2 == target_link_speed)
- printk("PSPP request to switch to Gen2 from Gen3 Failed!");
- else
- printk("PSPP request to switch to Gen1 from Gen2 Failed!");
- }
- }
-
- return 0;
-}
-
-static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
- return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
-}
-
-
-
-static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
- int tmp_result, result = 0;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to find DPM states clocks in DPM table!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- polaris10_request_link_speed_change_before_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to request link speed change before state change!",
- result = tmp_result);
- }
-
- tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate and upload SCLK MCLK DPM levels!",
- result = tmp_result);
-
- tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to generate DPM level enabled mask!",
- result = tmp_result);
-
- tmp_result = polaris10_update_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update SCLK threshold!",
- result = tmp_result);
-
- tmp_result = polaris10_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program memory timing parameters!",
- result = tmp_result);
-
- tmp_result = polaris10_notify_smc_display(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify smc display settings!",
- result = tmp_result);
-
- tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to unfreeze SCLK MCLK DPM!",
- result = tmp_result);
-
- tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to upload DPM level enabled mask!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- polaris10_notify_link_speed_change_after_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify link speed change after state change!",
- result = tmp_result);
- }
- data->apply_optimized_settings = false;
- return result;
-}
-
-static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-
-int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
- PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
- return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
-}
-
-int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
-{
- uint32_t num_active_displays = 0;
- struct cgs_display_info info = {0};
- info.mode_info = NULL;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- num_active_displays = info.display_count;
-
- if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
- polaris10_notify_smc_display_change(hwmgr, false);
-
-
- return 0;
-}
-
-/**
-* Programs the display gap
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always OK
-*/
-int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t num_active_displays = 0;
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
- uint32_t display_gap2;
- uint32_t pre_vbi_time_in_us;
- uint32_t frame_time_in_us;
- uint32_t ref_clock;
- uint32_t refresh_rate = 0;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_displays = info.display_count;
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- ref_clock = mode_info.ref_clock;
- refresh_rate = mode_info.refresh_rate;
-
- if (0 == refresh_rate)
- refresh_rate = 60;
-
- frame_time_in_us = 1000000 / refresh_rate;
-
- pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
- data->frame_time_x2 = frame_time_in_us * 2 / 100;
-
- display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
-
- return 0;
-}
-
-
-int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
- return polaris10_program_display_gap(hwmgr);
-}
-
-/**
-* Set maximum target operating fan output RPM
-*
-* @param hwmgr: the address of the powerplay hardware manager.
-* @param usMaxFanRpm: max operating fan RPM value.
-* @return The response that came from the SMC.
-*/
-static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info)
-{
- return 0;
-}
-
-bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- bool is_update_required = false;
- struct cgs_display_info info = {0, 0, NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
- if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
- if (min_clocks.engineClockInSR != data->display_timing.minClockInSR &&
- (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
- data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK))
- is_update_required = true;
-*/
- return is_update_required;
-}
-
-static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1,
- const struct polaris10_performance_level *pl2)
-{
- return ((pl1->memory_clock == pl2->memory_clock) &&
- (pl1->engine_clock == pl2->engine_clock) &&
- (pl1->pcie_gen == pl2->pcie_gen) &&
- (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
- const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
- const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
- int i;
-
- if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
- return -EINVAL;
-
- /* If the two states don't even have the same number of performance levels they cannot be the same state. */
- if (psa->performance_level_count != psb->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < psa->performance_level_count; i++) {
- if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
- /* If we have found even one performance level pair that is different the states are different. */
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
- *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
- *equal &= (psa->sclk_threshold == psb->sclk_threshold);
-
- return 0;
-}
-
-int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- uint32_t vbios_version;
-
- /* Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
-
- phm_get_mc_microcode_version(hwmgr);
- vbios_version = hwmgr->microcode_version_info.MC & 0xf;
- /* Full version of MC ucode has already been loaded. */
- if (vbios_version == 0) {
- data->need_long_memory_training = false;
- return 0;
- }
-
- data->need_long_memory_training = false;
-
-/*
- * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
- pfd = &tonga_mcmeFirmware;
- if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
- polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,
- pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
- pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
-*/
- return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
- & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
- & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
- & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
-
- return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
- data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
-
- return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
- return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
-
- return 0;
-}
-
-static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- data->low_sclk_interrupt_threshold = 0;
-
- return 0;
-}
-
-int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- polaris10_upload_mc_firmware(hwmgr);
-
- tmp_result = polaris10_read_clock_registers(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to read clock registers!", result = tmp_result);
-
- tmp_result = polaris10_get_memory_type(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get memory type!", result = tmp_result);
-
- tmp_result = polaris10_enable_acpi_power_management(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ACPI power management!", result = tmp_result);
-
- tmp_result = polaris10_init_power_gate_state(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init power gate state!", result = tmp_result);
-
- tmp_result = phm_get_mc_microcode_version(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get MC microcode version!", result = tmp_result);
-
- tmp_result = polaris10_init_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init sclk threshold!", result = tmp_result);
-
- return result;
-}
-
-static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
- case PP_MCLK:
- if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
- case PP_PCIE:
- {
- uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
-
- while (tmp >>= 1)
- level++;
-
- if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
- uint32_t speedCntl = 0;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speedCntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, char *buf)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = polaris10_get_current_pcie_speed(hwmgr);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
- return size;
-}
-
-static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
- polaris10_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
-}
-
-static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->fan_ctrl_is_in_default_mode)
- return hwmgr->fan_ctrl_default_mode;
- else
- return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct polaris10_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- struct pp_power_state *ps;
- struct polaris10_power_state *polaris10_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
- polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct polaris10_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- struct pp_power_state *ps;
- struct polaris10_power_state *polaris10_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
- polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
- .backend_init = &polaris10_hwmgr_backend_init,
- .backend_fini = &polaris10_hwmgr_backend_fini,
- .asic_setup = &polaris10_setup_asic_task,
- .dynamic_state_management_enable = &polaris10_enable_dpm_tasks,
- .apply_state_adjust_rules = polaris10_apply_state_adjust_rules,
- .force_dpm_level = &polaris10_force_dpm_level,
- .power_state_set = polaris10_set_power_state_tasks,
- .get_power_state_size = polaris10_get_power_state_size,
- .get_mclk = polaris10_dpm_get_mclk,
- .get_sclk = polaris10_dpm_get_sclk,
- .patch_boot_state = polaris10_dpm_patch_boot_state,
- .get_pp_table_entry = polaris10_get_pp_table_entry,
- .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
- .print_current_perforce_level = polaris10_print_current_perforce_level,
- .powerdown_uvd = polaris10_phm_powerdown_uvd,
- .powergate_uvd = polaris10_phm_powergate_uvd,
- .powergate_vce = polaris10_phm_powergate_vce,
- .disable_clock_power_gating = polaris10_phm_disable_clock_power_gating,
- .update_clock_gatings = polaris10_phm_update_clock_gatings,
- .notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment,
- .display_config_changed = polaris10_display_configuration_changed_task,
- .set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output,
- .set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output,
- .get_temperature = polaris10_thermal_get_temperature,
- .stop_thermal_controller = polaris10_thermal_stop_thermal_controller,
- .get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent,
- .reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default,
- .get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm,
- .uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller,
- .register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt,
- .check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration,
- .check_states_equal = polaris10_check_states_equal,
- .set_fan_control_mode = polaris10_set_fan_control_mode,
- .get_fan_control_mode = polaris10_get_fan_control_mode,
- .force_clock_level = polaris10_force_clock_level,
- .print_clock_levels = polaris10_print_clock_levels,
- .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
- .get_sclk_od = polaris10_get_sclk_od,
- .set_sclk_od = polaris10_set_sclk_od,
- .get_mclk_od = polaris10_get_mclk_od,
- .set_mclk_od = polaris10_set_mclk_od,
-};
-
-int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
- hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
- hwmgr->pptable_func = &tonga_pptable_funcs;
- pp_polaris10_thermal_initialize(hwmgr);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
deleted file mode 100644
index b206632d4650..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <asm/div64.h>
-#include "polaris10_thermal.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_smumgr.h"
-#include "polaris10_ppsmc.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
- struct phm_fan_speed_info *fan_speed_info)
-{
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- fan_speed_info->supports_percent_read = true;
- fan_speed_info->supports_percent_write = true;
- fan_speed_info->min_percent = 0;
- fan_speed_info->max_percent = 100;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
- hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
- fan_speed_info->supports_rpm_read = true;
- fan_speed_info->supports_rpm_write = true;
- fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
- fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
- } else {
- fan_speed_info->min_rpm = 0;
- fan_speed_info->max_rpm = 0;
- }
-
- return 0;
-}
-
-int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
- uint32_t *speed)
-{
- uint32_t duty100;
- uint32_t duty;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
- duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
- if (duty100 == 0)
- return -EINVAL;
-
-
- tmp64 = (uint64_t)duty * 100;
- do_div(tmp64, duty100);
- *speed = (uint32_t)tmp64;
-
- if (*speed > 100)
- *speed = 100;
-
- return 0;
-}
-
-int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
- uint32_t tach_period;
- uint32_t crystal_clock_freq;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution == 0))
- return 0;
-
- tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_STATUS, TACH_PERIOD);
-
- if (tach_period == 0)
- return -EINVAL;
-
- crystal_clock_freq = tonga_get_xclk(hwmgr);
-
- *speed = 60 * crystal_clock_freq * 10000 / tach_period;
-
- return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param hwmgr the address of the powerplay hardware manager.
-* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
- if (hwmgr->fan_ctrl_is_in_default_mode) {
- hwmgr->fan_ctrl_default_mode =
- PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
- hwmgr->tmin =
- PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, TMIN);
- hwmgr->fan_ctrl_is_in_default_mode = false;
- }
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, TMIN, 0);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
- return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param hwmgr the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
- if (!hwmgr->fan_ctrl_is_in_default_mode) {
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, TMIN, hwmgr->tmin);
- hwmgr->fan_ctrl_is_in_default_mode = true;
- }
-
- return 0;
-}
-
-int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM))
- hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM);
- else
- hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanPWM);
-
- } else {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
- }
-
- if (!result && hwmgr->thermal_controller.
- advanceFanControlParameters.ucTargetTemperature)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanTemperatureTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucTargetTemperature);
-
- return result;
-}
-
-
-int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
-}
-
-/**
-* Set Fan Speed in percent.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
- uint32_t speed)
-{
- uint32_t duty100;
- uint32_t duty;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- if (speed > 100)
- speed = 100;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
- duty = (uint32_t)tmp64;
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
- return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param hwmgr the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
- result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
- if (!result)
- result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
- } else
- result = polaris10_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
- uint32_t tach_period;
- uint32_t crystal_clock_freq;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution == 0) ||
- (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
- (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
- return 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-
- crystal_clock_freq = tonga_get_xclk(hwmgr);
-
- tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_STATUS, TACH_PERIOD, tach_period);
-
- return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param hwmgr The address of the hardware manager.
-*/
-int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
- int temp;
-
- temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
- /* Bit 9 means the reading is lower than the lowest usable value. */
- if (temp & 0x200)
- temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
- else
- temp = temp & 0x1ff;
-
- temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param hwmgr The address of the hardware manager.
-* @param range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- uint32_t low_temp, uint32_t high_temp)
-{
- uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- if (low < low_temp)
- low = low_temp;
- if (high > high_temp)
- high = high_temp;
-
- if (low > high)
- return -EINVAL;
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, DIG_THERM_INTH,
- (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, DIG_THERM_INTL,
- (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_CTRL, DIG_THERM_DPM,
- (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
- return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_CTRL, EDGE_PER_REV,
- hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution - 1);
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
- return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
- uint32_t alert;
-
- alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, THERM_INT_MASK);
- alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, THERM_INT_MASK, alert);
-
- /* send message to SMU to enable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
- uint32_t alert;
-
- alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, THERM_INT_MASK);
- alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, THERM_INT_MASK, alert);
-
- /* send message to SMU to disable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param hwmgr The address of the hardware manager.
-*/
-int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
- int result = polaris10_thermal_disable_alert(hwmgr);
-
- if (!hwmgr->thermal_controller.fanInfo.bNoFan)
- polaris10_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (data->fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = tonga_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- data->sram_end);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled
- * PHM_PlatformCaps_MicrocodeFanControl even after
- * this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
- polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
- polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
- }
-
- return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
- if (range == NULL)
- return -EINVAL;
-
- return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return polaris10_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return polaris10_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return polaris10_thermal_disable_alert(hwmgr);
-}
-
-static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return 0;
-
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
-
- ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
- 0 : -1;
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
-}
-
-static const struct phm_master_table_item
-polaris10_thermal_start_thermal_controller_master_list[] = {
- {NULL, tf_polaris10_thermal_initialize},
- {NULL, tf_polaris10_thermal_set_temperature_range},
- {NULL, tf_polaris10_thermal_enable_alert},
- {NULL, tf_polaris10_thermal_avfs_enable},
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this
- * so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
- {NULL, tf_polaris10_thermal_setup_fan_table},
- {NULL, tf_polaris10_thermal_start_smc_fan_control},
- {NULL, NULL}
-};
-
-static const struct phm_master_table_header
-polaris10_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- polaris10_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item
-polaris10_thermal_set_temperature_range_master_list[] = {
- {NULL, tf_polaris10_thermal_disable_alert},
- {NULL, tf_polaris10_thermal_set_temperature_range},
- {NULL, tf_polaris10_thermal_enable_alert},
- {NULL, NULL}
-};
-
-static const struct phm_master_table_header
-polaris10_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- polaris10_thermal_set_temperature_range_master_list
-};
-
-int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
- if (!hwmgr->thermal_controller.fanInfo.bNoFan)
- polaris10_fan_ctrl_set_default_mode(hwmgr);
- return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr,
- &polaris10_thermal_set_temperature_range_master,
- &(hwmgr->set_temperature_range));
-
- if (!result) {
- result = phm_construct_table(hwmgr,
- &polaris10_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (result)
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- }
-
- if (!result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
deleted file mode 100644
index 62f8cbc2d590..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _POLARIS10_THERMAL_H_
-#define _POLARIS10_THERMAL_H_
-
-#include "hwmgr.h"
-
-#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1
-#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2
-
-#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256
-#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255
-
-#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0
-#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 26f3e30d0fef..1126bd4f74dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "ppatomctrl.h"
#include "atombios.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
index f127198aafc4..1e870f58dd12 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
typedef struct _ATOM_Tonga_State_Array {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_State states[1]; /* Dynamically allocate entries. */
+ ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
} ATOM_Tonga_State_Array;
typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index cfb647f76cbe..7de701d8a450 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -22,15 +22,14 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
-#include "tonga_processpptables.h"
+#include "process_pptables_v1_0.h"
#include "ppatomctrl.h"
#include "atombios.h"
#include "pp_debug.h"
#include "hwmgr.h"
#include "cgs_common.h"
-#include "tonga_pptable.h"
+#include "pptable_v1_0.h"
/**
* Private Function used during initialization.
@@ -154,12 +153,14 @@ const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
static int get_vddc_lookup_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table **lookup_table,
- const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
- uint32_t max_levels
+ const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
+ uint32_t max_levels
)
{
uint32_t table_size, i;
phm_ppt_v1_voltage_lookup_table *table;
+ phm_ppt_v1_voltage_lookup_record *record;
+ ATOM_Tonga_Voltage_Lookup_Record *atom_record;
PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
"Invalid CAC Leakage PowerPlay Table!", return 1);
@@ -177,15 +178,17 @@ static int get_vddc_lookup_table(
table->count = vddc_lookup_pp_tables->ucNumEntries;
for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
- table->entries[i].us_calculated = 0;
- table->entries[i].us_vdd =
- vddc_lookup_pp_tables->entries[i].usVdd;
- table->entries[i].us_cac_low =
- vddc_lookup_pp_tables->entries[i].usCACLow;
- table->entries[i].us_cac_mid =
- vddc_lookup_pp_tables->entries[i].usCACMid;
- table->entries[i].us_cac_high =
- vddc_lookup_pp_tables->entries[i].usCACHigh;
+ record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_voltage_lookup_record,
+ entries, table, i);
+ atom_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_Voltage_Lookup_Record,
+ entries, vddc_lookup_pp_tables, i);
+ record->us_calculated = 0;
+ record->us_vdd = atom_record->usVdd;
+ record->us_cac_low = atom_record->usCACLow;
+ record->us_cac_mid = atom_record->usCACMid;
+ record->us_cac_high = atom_record->usCACHigh;
}
*lookup_table = table;
@@ -314,11 +317,12 @@ static int init_dpm_2_parameters(
static int get_valid_clk(
struct pp_hwmgr *hwmgr,
struct phm_clock_array **clk_table,
- const phm_ppt_v1_clock_voltage_dependency_table * clk_volt_pp_table
+ phm_ppt_v1_clock_voltage_dependency_table const *clk_volt_pp_table
)
{
uint32_t table_size, i;
struct phm_clock_array *table;
+ phm_ppt_v1_clock_voltage_dependency_record *dep_record;
PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
"Invalid PowerPlay Table!", return -1);
@@ -335,9 +339,12 @@ static int get_valid_clk(
table->count = (uint32_t)clk_volt_pp_table->count;
- for (i = 0; i < table->count; i++)
- table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
-
+ for (i = 0; i < table->count; i++) {
+ dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, clk_volt_pp_table, i);
+ table->values[i] = (uint32_t)dep_record->clk;
+ }
*clk_table = table;
return 0;
@@ -346,7 +353,7 @@ static int get_valid_clk(
static int get_hard_limits(
struct pp_hwmgr *hwmgr,
struct phm_clock_and_voltage_limits *limits,
- const ATOM_Tonga_Hard_Limit_Table * limitable
+ ATOM_Tonga_Hard_Limit_Table const *limitable
)
{
PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
@@ -364,11 +371,13 @@ static int get_hard_limits(
static int get_mclk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
- const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table
+ ATOM_Tonga_MCLK_Dependency_Table const *mclk_dep_table
)
{
uint32_t table_size, i;
phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
+ phm_ppt_v1_clock_voltage_dependency_record *mclk_table_record;
+ ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1);
@@ -386,16 +395,17 @@ static int get_mclk_voltage_dependency_table(
mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
- mclk_table->entries[i].vddInd =
- mclk_dep_table->entries[i].ucVddcInd;
- mclk_table->entries[i].vdd_offset =
- mclk_dep_table->entries[i].usVddgfxOffset;
- mclk_table->entries[i].vddci =
- mclk_dep_table->entries[i].usVddci;
- mclk_table->entries[i].mvdd =
- mclk_dep_table->entries[i].usMvdd;
- mclk_table->entries[i].clk =
- mclk_dep_table->entries[i].ulMclk;
+ mclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, mclk_table, i);
+ mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MCLK_Dependency_Record,
+ entries, mclk_dep_table, i);
+ mclk_table_record->vddInd = mclk_dep_record->ucVddcInd;
+ mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset;
+ mclk_table_record->vddci = mclk_dep_record->usVddci;
+ mclk_table_record->mvdd = mclk_dep_record->usMvdd;
+ mclk_table_record->clk = mclk_dep_record->ulMclk;
}
*pp_tonga_mclk_dep_table = mclk_table;
@@ -406,15 +416,17 @@ static int get_mclk_voltage_dependency_table(
static int get_sclk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
- const PPTable_Generic_SubTable_Header *sclk_dep_table
+ PPTable_Generic_SubTable_Header const *sclk_dep_table
)
{
uint32_t table_size, i;
phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
+ phm_ppt_v1_clock_voltage_dependency_record *sclk_table_record;
if (sclk_dep_table->ucRevId < 1) {
const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
(ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
+ ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1);
@@ -432,20 +444,23 @@ static int get_sclk_voltage_dependency_table(
sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
for (i = 0; i < tonga_table->ucNumEntries; i++) {
- sclk_table->entries[i].vddInd =
- tonga_table->entries[i].ucVddInd;
- sclk_table->entries[i].vdd_offset =
- tonga_table->entries[i].usVddcOffset;
- sclk_table->entries[i].clk =
- tonga_table->entries[i].ulSclk;
- sclk_table->entries[i].cks_enable =
- (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
- sclk_table->entries[i].cks_voffset =
- (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+ sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_SCLK_Dependency_Record,
+ entries, tonga_table, i);
+ sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, sclk_table, i);
+ sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+ sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+ sclk_table_record->clk = sclk_dep_record->ulSclk;
+ sclk_table_record->cks_enable =
+ (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
}
} else {
const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
(ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+ ATOM_Polaris_SCLK_Dependency_Record *sclk_dep_record;
PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1);
@@ -463,17 +478,19 @@ static int get_sclk_voltage_dependency_table(
sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
for (i = 0; i < polaris_table->ucNumEntries; i++) {
- sclk_table->entries[i].vddInd =
- polaris_table->entries[i].ucVddInd;
- sclk_table->entries[i].vdd_offset =
- polaris_table->entries[i].usVddcOffset;
- sclk_table->entries[i].clk =
- polaris_table->entries[i].ulSclk;
- sclk_table->entries[i].cks_enable =
- (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
- sclk_table->entries[i].cks_voffset =
- (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
- sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+ sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Polaris_SCLK_Dependency_Record,
+ entries, polaris_table, i);
+ sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, sclk_table, i);
+ sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+ sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+ sclk_table_record->clk = sclk_dep_record->ulSclk;
+ sclk_table_record->cks_enable =
+ (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+ sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset;
}
}
*pp_tonga_sclk_dep_table = sclk_table;
@@ -484,16 +501,19 @@ static int get_sclk_voltage_dependency_table(
static int get_pcie_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
- const PPTable_Generic_SubTable_Header * pTable
+ PPTable_Generic_SubTable_Header const *ptable
)
{
uint32_t table_size, i, pcie_count;
phm_ppt_v1_pcie_table *pcie_table;
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_pcie_record *pcie_record;
+
+ if (ptable->ucRevId < 1) {
+ const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)ptable;
+ ATOM_Tonga_PCIE_Record *atom_pcie_record;
- if (pTable->ucRevId < 1) {
- const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
"Invalid PowerPlay Table!", return -1);
@@ -519,18 +539,23 @@ static int get_pcie_table(
Disregarding the excess entries... \n");
pcie_table->count = pcie_count;
-
for (i = 0; i < pcie_count; i++) {
- pcie_table->entries[i].gen_speed =
- atom_pcie_table->entries[i].ucPCIEGenSpeed;
- pcie_table->entries[i].lane_width =
- atom_pcie_table->entries[i].usPCIELaneWidth;
+ pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_pcie_record,
+ entries, pcie_table, i);
+ atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_PCIE_Record,
+ entries, atom_pcie_table, i);
+ pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+ pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
}
*pp_tonga_pcie_table = pcie_table;
} else {
/* Polaris10/Polaris11 and newer. */
- const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
+ const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)ptable;
+ ATOM_Polaris10_PCIE_Record *atom_pcie_record;
+
PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
"Invalid PowerPlay Table!", return -1);
@@ -558,12 +583,15 @@ static int get_pcie_table(
pcie_table->count = pcie_count;
for (i = 0; i < pcie_count; i++) {
- pcie_table->entries[i].gen_speed =
- atom_pcie_table->entries[i].ucPCIEGenSpeed;
- pcie_table->entries[i].lane_width =
- atom_pcie_table->entries[i].usPCIELaneWidth;
- pcie_table->entries[i].pcie_sclk =
- atom_pcie_table->entries[i].ulPCIE_Sclk;
+ pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_pcie_record,
+ entries, pcie_table, i);
+ atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Polaris10_PCIE_Record,
+ entries, atom_pcie_table, i);
+ pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+ pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+ pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk;
}
*pp_tonga_pcie_table = pcie_table;
@@ -685,6 +713,7 @@ static int get_mm_clock_voltage_table(
uint32_t table_size, i;
const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
+ phm_ppt_v1_mm_clock_voltage_dependency_record *mm_table_record;
PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1);
@@ -701,14 +730,19 @@ static int get_mm_clock_voltage_table(
mm_table->count = mm_dependency_table->ucNumEntries;
for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
- mm_dependency_record = &mm_dependency_table->entries[i];
- mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd;
- mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset;
- mm_table->entries[i].aclk = mm_dependency_record->ulAClk;
- mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk;
- mm_table->entries[i].eclk = mm_dependency_record->ulEClk;
- mm_table->entries[i].vclk = mm_dependency_record->ulVClk;
- mm_table->entries[i].dclk = mm_dependency_record->ulDClk;
+ mm_dependency_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MM_Dependency_Record,
+ entries, mm_dependency_table, i);
+ mm_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_mm_clock_voltage_dependency_record,
+ entries, mm_table, i);
+ mm_table_record->vddcInd = mm_dependency_record->ucVddcInd;
+ mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset;
+ mm_table_record->aclk = mm_dependency_record->ulAClk;
+ mm_table_record->samclock = mm_dependency_record->ulSAMUClk;
+ mm_table_record->eclk = mm_dependency_record->ulEClk;
+ mm_table_record->vclk = mm_dependency_record->ulVClk;
+ mm_table_record->dclk = mm_dependency_record->ulDClk;
}
*tonga_mm_table = mm_table;
@@ -1015,7 +1049,7 @@ static int check_powerplay_tables(
return 0;
}
-int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
+int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
{
int result = 0;
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1066,7 +1100,7 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
return result;
}
-int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
+int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1110,14 +1144,14 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
return 0;
}
-const struct pp_table_func tonga_pptable_funcs = {
- .pptable_init = tonga_pp_tables_initialize,
- .pptable_fini = tonga_pp_tables_uninitialize,
+const struct pp_table_func pptable_v1_0_funcs = {
+ .pptable_init = pp_tables_v1_0_initialize,
+ .pptable_fini = pp_tables_v1_0_uninitialize,
};
-int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
+int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr)
{
- const ATOM_Tonga_State_Array * state_arrays;
+ ATOM_Tonga_State_Array const *state_arrays;
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
PP_ASSERT_WITH_CODE((NULL != pp_table),
@@ -1164,6 +1198,71 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
return result;
}
+static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
+{
+ const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+ const ATOM_Tonga_VCE_State_Table *vce_state_table =
+ (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
+
+ if (vce_state_table == NULL)
+ return 0;
+
+ return vce_state_table->ucNumEntries;
+}
+
+static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
+ struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+{
+ const ATOM_Tonga_VCE_State_Record *vce_state_record;
+ ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
+ ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
+ ATOM_Tonga_MM_Dependency_Record *mm_dep_record;
+ const ATOM_Tonga_POWERPLAYTABLE *pptable = get_powerplay_table(hwmgr);
+ const ATOM_Tonga_VCE_State_Table *vce_state_table = (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pptable)
+ + le16_to_cpu(pptable->usVCEStateTableOffset));
+ const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = (ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long)pptable)
+ + le16_to_cpu(pptable->usSclkDependencyTableOffset));
+ const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long)pptable)
+ + le16_to_cpu(pptable->usMclkDependencyTableOffset));
+ const ATOM_Tonga_MM_Dependency_Table *mm_dep_table = (ATOM_Tonga_MM_Dependency_Table *)(((unsigned long)pptable)
+ + le16_to_cpu(pptable->usMMDependencyTableOffset));
+
+ PP_ASSERT_WITH_CODE((i < vce_state_table->ucNumEntries),
+ "Requested state entry ID is out of range!",
+ return -EINVAL);
+
+ vce_state_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_VCE_State_Record,
+ entries, vce_state_table, i);
+ sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_SCLK_Dependency_Record,
+ entries, sclk_dep_table,
+ vce_state_record->ucSCLKIndex);
+ mm_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MM_Dependency_Record,
+ entries, mm_dep_table,
+ vce_state_record->ucVCEClockIndex);
+ *flag = vce_state_record->ucFlag;
+
+ vce_state->evclk = mm_dep_record->ulEClk;
+ vce_state->ecclk = mm_dep_record->ulEClk;
+ vce_state->sclk = sclk_dep_record->ulSclk;
+
+ if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries)
+ mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MCLK_Dependency_Record,
+ entries, mclk_dep_table,
+ mclk_dep_table->ucNumEntries - 1);
+ else
+ mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MCLK_Dependency_Record,
+ entries, mclk_dep_table,
+ vce_state_record->ucMCLKIndex);
+
+ vce_state->mclk = mclk_dep_record->ulMclk;
+ return 0;
+}
+
/**
* Create a Power State out of an entry in the PowerPlay table.
* This function is called by the hardware back-end.
@@ -1172,15 +1271,17 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
* @param power_state The address of the PowerState instance being created.
* @return -1 if the entry cannot be retrieved.
*/
-int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
+int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
uint32_t entry_index, struct pp_power_state *power_state,
int (*call_back_func)(struct pp_hwmgr *, void *,
struct pp_power_state *, void *, uint32_t))
{
int result = 0;
- const ATOM_Tonga_State_Array * state_arrays;
+ const ATOM_Tonga_State_Array *state_arrays;
const ATOM_Tonga_State *state_entry;
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+ int i, j;
+ uint32_t flags = 0;
PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
power_state->classification.bios_index = entry_index;
@@ -1197,7 +1298,9 @@ int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
"Invalid PowerPlay Table State Array Entry.", return -1);
- state_entry = &(state_arrays->states[entry_index]);
+ state_entry = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_State, entries,
+ state_arrays, entry_index);
result = call_back_func(hwmgr, (void *)state_entry, power_state,
(void *)pp_table,
@@ -1210,5 +1313,13 @@ int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
PP_StateClassificationFlag_Boot))
result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
+ hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
+
+ if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+ for (j = 0; j < i; j++)
+ ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
+ }
+
return result;
}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
index d24b8887f466..b9710abdff01 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
@@ -20,14 +20,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
+#ifndef _PROCESSPPTABLES_V1_0_H
+#define _PROCESSPPTABLES_V1_0_H
#include "hwmgr.h"
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
+extern const struct pp_table_func pptable_v1_0_funcs;
+extern int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr);
+extern int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index,
struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
struct pp_power_state *, void *, uint32_t));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 6c321b0d8a1e..ccf7ebeaf892 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1523,7 +1523,7 @@ int get_number_of_vce_state_table_entries(
int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
unsigned long i,
- struct PP_VCEState *vce_state,
+ struct pp_vce_state *vce_state,
void **clock_info,
unsigned long *flag)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index b5edb5105986..6eb6db199250 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -21,9 +21,53 @@
*
*/
-#include "polaris10_clockpowergating.h"
+#include "smu7_hwmgr.h"
+#include "smu7_clockpowergating.h"
+#include "smu7_common.h"
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ PPSMC_MSG_UVDDPM_Enable :
+ PPSMC_MSG_UVDDPM_Disable);
+}
+
+static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ PPSMC_MSG_VCEDPM_Enable :
+ PPSMC_MSG_VCEDPM_Disable);
+}
+
+static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ PPSMC_MSG_SAMUDPM_Enable :
+ PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ if (!bgate)
+ smum_update_smc_table(hwmgr, SMU_UVD_TABLE);
+ return smu7_enable_disable_uvd_dpm(hwmgr, !bgate);
+}
+
+static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ if (!bgate)
+ smum_update_smc_table(hwmgr, SMU_VCE_TABLE);
+ return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
+}
+
+static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ if (!bgate)
+ smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
+ return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
+}
+
+int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -31,7 +75,7 @@ int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
+int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -47,7 +91,7 @@ int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
+int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -55,7 +99,7 @@ int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
+int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -63,7 +107,7 @@ int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
+int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -72,7 +116,7 @@ int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
+int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -81,27 +125,24 @@ int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
+int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
data->samu_power_gated = false;
- polaris10_phm_powerup_uvd(hwmgr);
- polaris10_phm_powerup_vce(hwmgr);
- polaris10_phm_powerup_samu(hwmgr);
+ smu7_powerup_uvd(hwmgr);
+ smu7_powerup_vce(hwmgr);
+ smu7_powerup_samu(hwmgr);
return 0;
}
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->uvd_power_gated == bgate)
- return 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = bgate;
@@ -109,11 +150,11 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
- polaris10_update_uvd_dpm(hwmgr, true);
- polaris10_phm_powerdown_uvd(hwmgr);
+ smu7_update_uvd_dpm(hwmgr, true);
+ smu7_powerdown_uvd(hwmgr);
} else {
- polaris10_phm_powerup_uvd(hwmgr);
- polaris10_update_uvd_dpm(hwmgr, false);
+ smu7_powerup_uvd(hwmgr);
+ smu7_update_uvd_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
@@ -122,9 +163,9 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
return 0;
}
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->vce_power_gated == bgate)
return 0;
@@ -135,11 +176,11 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
- polaris10_update_vce_dpm(hwmgr, true);
- polaris10_phm_powerdown_vce(hwmgr);
+ smu7_update_vce_dpm(hwmgr, true);
+ smu7_powerdown_vce(hwmgr);
} else {
- polaris10_phm_powerup_vce(hwmgr);
- polaris10_update_vce_dpm(hwmgr, false);
+ smu7_powerup_vce(hwmgr);
+ smu7_update_vce_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
@@ -147,9 +188,9 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
return 0;
}
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->samu_power_gated == bgate)
return 0;
@@ -157,22 +198,25 @@ int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
data->samu_power_gated = bgate;
if (bgate) {
- polaris10_update_samu_dpm(hwmgr, true);
- polaris10_phm_powerdown_samu(hwmgr);
+ smu7_update_samu_dpm(hwmgr, true);
+ smu7_powerdown_samu(hwmgr);
} else {
- polaris10_phm_powerup_samu(hwmgr);
- polaris10_update_samu_dpm(hwmgr, false);
+ smu7_powerup_samu(hwmgr);
+ smu7_update_samu_dpm(hwmgr, false);
}
return 0;
}
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id)
{
PPSMC_Msg msg;
uint32_t value;
+ if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU))
+ return 0;
+
switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
case PP_GROUP_GFX:
switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
@@ -185,7 +229,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
@@ -195,7 +239,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -208,7 +252,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -219,7 +263,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -232,7 +276,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -245,7 +289,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -259,12 +303,12 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
default:
- return -1;
+ return -EINVAL;
}
break;
@@ -279,7 +323,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@@ -289,7 +333,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -302,7 +346,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -313,7 +357,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -326,7 +370,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@@ -336,7 +380,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -349,7 +393,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -360,7 +404,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -373,7 +417,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -384,7 +428,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -397,18 +441,18 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
default:
- return -1;
+ return -EINVAL;
}
break;
default:
- return -1;
+ return -EINVAL;
}
@@ -419,7 +463,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
* Powerplay will only control the static per CU Power Gating.
* Dynamic per CU Power Gating will be done in gfx.
*/
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
{
struct cgs_system_info sys_info = {0};
uint32_t active_cus;
@@ -432,8 +476,8 @@ int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable
if (result)
return -EINVAL;
- else
- active_cus = sys_info.value;
+
+ active_cus = sys_info.value;
if (enable)
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 33af5f511ab8..d52a28c343e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,15 +21,20 @@
*
*/
-#ifndef _FIJI_CLOCK_POWER_GATING_H_
-#define _FIJI_CLOCK_POWER_GATING_H_
+#ifndef _SMU7_CLOCK_POWER_GATING_H_
+#define _SMU7_CLOCK__POWER_GATING_H_
-#include "fiji_hwmgr.h"
+#include "smu7_hwmgr.h"
#include "pp_asicblocks.h"
-extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
+int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
+int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
+ const uint32_t *msg_id);
+int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
new file mode 100644
index 000000000000..f967613191cf
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_DYN_DEFAULTS_H
+#define _SMU7_DYN_DEFAULTS_H
+
+
+/* We need to fill in the default values */
+
+
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
+
+
+#define SMU7_THERMALPROTECTCOUNTER_DFLT 0x200
+#define SMU7_STATICSCREENTHRESHOLDUNIT_DFLT 0
+#define SMU7_STATICSCREENTHRESHOLD_DFLT 0x00C8
+#define SMU7_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
+#define SMU7_REFERENCEDIVIDER_DFLT 4
+
+#define SMU7_ULVVOLTAGECHANGEDELAY_DFLT 1687
+
+#define SMU7_CGULVPARAMETER_DFLT 0x00040035
+#define SMU7_CGULVCONTROL_DFLT 0x00007450
+#define SMU7_TARGETACTIVITY_DFLT 50
+#define SMU7_MCLK_TARGETACTIVITY_DFLT 10
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
new file mode 100644
index 000000000000..508245d49d33
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -0,0 +1,4359 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <asm/div64.h>
+#include "linux/delay.h"
+#include "pp_acpi.h"
+#include "pp_debug.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pptable_v1_0.h"
+#include "pppcielanes.h"
+#include "amd_pcie_helpers.h"
+#include "hardwaremanager.h"
+#include "process_pptables_v1_0.h"
+#include "cgs_common.h"
+
+#include "smu7_common.h"
+
+#include "hwmgr.h"
+#include "smu7_hwmgr.h"
+#include "smu7_powertune.h"
+#include "smu7_dyn_defaults.h"
+#include "smu7_thermal.h"
+#include "smu7_clockpowergating.h"
+#include "processpptables.h"
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0 0x05
+#define MC_CG_SEQ_DRAMCONF_S1 0x06
+#define MC_CG_SEQ_YCLK_SUSPEND 0x04
+#define MC_CG_SEQ_YCLK_RESUME 0x0a
+
+#define SMC_CG_IND_START 0xc0030000
+#define SMC_CG_IND_END 0xc0040000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+
+#define MEM_FREQ_LOW_LATENCY 25000
+#define MEM_FREQ_HIGH_LATENCY 80000
+
+#define MEM_LATENCY_HIGH 45
+#define MEM_LATENCY_LOW 35
+#define MEM_LATENCY_ERR 0xFFFF
+
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+
+/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
+enum DPM_EVENT_SRC {
+ DPM_EVENT_SRC_ANALOG = 0,
+ DPM_EVENT_SRC_EXTERNAL = 1,
+ DPM_EVENT_SRC_DIGITAL = 2,
+ DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
+};
+
+static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
+
+struct smu7_power_state *cast_phw_smu7_power_state(
+ struct pp_hw_power_state *hw_ps)
+{
+ PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
+ "Invalid Powerstate Type!",
+ return NULL);
+
+ return (struct smu7_power_state *)hw_ps;
+}
+
+const struct smu7_power_state *cast_const_phw_smu7_power_state(
+ const struct pp_hw_power_state *hw_ps)
+{
+ PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
+ "Invalid Powerstate Type!",
+ return NULL);
+
+ return (const struct smu7_power_state *)hw_ps;
+}
+
+/**
+ * Find the MC microcode version and store it in the HwMgr struct
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+{
+ cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
+
+ hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+ return 0;
+}
+
+uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+ uint32_t speedCntl = 0;
+
+ /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+ speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
+ ixPCIE_LC_SPEED_CNTL);
+ return((uint16_t)PHM_GET_FIELD(speedCntl,
+ PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+{
+ uint32_t link_width;
+
+ /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+ link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+ PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
+
+ PP_ASSERT_WITH_CODE((7 >= link_width),
+ "Invalid PCIe lane width!", return 0);
+
+ return decode_pcie_lane_width(link_width);
+}
+
+/**
+* Enable voltage control
+*
+* @param pHwMgr the address of the powerplay hardware manager.
+* @return always PP_Result_OK
+*/
+int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
+
+ return 0;
+}
+
+/**
+* Checks if we want to support voltage control
+*
+* @param hwmgr the address of the powerplay hardware manager.
+*/
+static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
+{
+ const struct smu7_hwmgr *data =
+ (const struct smu7_hwmgr *)(hwmgr->backend);
+
+ return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
+}
+
+/**
+* Enable voltage control
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
+{
+ /* enable voltage control */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
+
+ return 0;
+}
+
+static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
+ struct phm_clock_voltage_dependency_table *voltage_dependency_table
+ )
+{
+ uint32_t i;
+
+ PP_ASSERT_WITH_CODE((NULL != voltage_table),
+ "Voltage Dependency Table empty.", return -EINVAL;);
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+ voltage_table->count = voltage_dependency_table->count;
+
+ for (i = 0; i < voltage_dependency_table->count; i++) {
+ voltage_table->entries[i].value =
+ voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+
+/**
+* Create Voltage Tables.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ int result = 0;
+ uint32_t tmp;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
+ &(data->mvdd_voltage_table));
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve MVDD table.",
+ return result);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
+ table_info->vdd_dep_on_mclk);
+ else if (hwmgr->pp_table_version == PP_TABLE_V0)
+ result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
+ hwmgr->dyn_state.mvdd_dependency_on_mclk);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 MVDD table from dependancy table.",
+ return result;);
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
+ &(data->vddci_voltage_table));
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve VDDCI table.",
+ return result);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
+ table_info->vdd_dep_on_mclk);
+ else if (hwmgr->pp_table_version == PP_TABLE_V0)
+ result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
+ hwmgr->dyn_state.vddci_dependency_on_mclk);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 VDDCI table from dependancy table.",
+ return result);
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ /* VDDGFX has only SVI2 voltage control */
+ result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
+ table_info->vddgfx_lookup_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
+ }
+
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
+ &data->vddc_voltage_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve VDDC table.", return result;);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0)
+ result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
+ hwmgr->dyn_state.vddc_dependency_on_mclk);
+ else if (hwmgr->pp_table_version == PP_TABLE_V1)
+ result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
+ table_info->vddc_lookup_table);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+ }
+
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
+ PP_ASSERT_WITH_CODE(
+ (data->vddc_voltage_table.count <= tmp),
+ "Too many voltage values for VDDC. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(tmp,
+ &(data->vddc_voltage_table)));
+
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ PP_ASSERT_WITH_CODE(
+ (data->vddgfx_voltage_table.count <= tmp),
+ "Too many voltage values for VDDC. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(tmp,
+ &(data->vddgfx_voltage_table)));
+
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
+ PP_ASSERT_WITH_CODE(
+ (data->vddci_voltage_table.count <= tmp),
+ "Too many voltage values for VDDCI. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(tmp,
+ &(data->vddci_voltage_table)));
+
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
+ PP_ASSERT_WITH_CODE(
+ (data->mvdd_voltage_table.count <= tmp),
+ "Too many voltage values for MVDD. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(tmp,
+ &(data->mvdd_voltage_table)));
+
+ return 0;
+}
+
+/**
+* Programs static screed detection parameters
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_program_static_screen_threshold_parameters(
+ struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* Set static screen threshold unit */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
+ data->static_screen_threshold_unit);
+ /* Set static screen threshold */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
+ data->static_screen_threshold);
+
+ return 0;
+}
+
+/**
+* Setup display gap for glitch free memory clock switching.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
+{
+ uint32_t display_gap =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_DISPLAY_GAP_CNTL);
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+ DISP_GAP, DISPLAY_GAP_IGNORE);
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+ DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+ return 0;
+}
+
+/**
+* Programs activity state transition voting clients
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* Clear reset for voting clients before enabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
+
+ return 0;
+}
+
+static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ /* Reset voting clients before disabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, 0);
+
+ return 0;
+}
+
+/* Copy one arb setting to another and then switch the active set.
+ * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
+ */
+static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
+ uint32_t arb_src, uint32_t arb_dest)
+{
+ uint32_t mc_arb_dram_timing;
+ uint32_t mc_arb_dram_timing2;
+ uint32_t burst_time;
+ uint32_t mc_cg_config;
+
+ switch (arb_src) {
+ case MC_CG_ARB_FREQ_F0:
+ mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
+ mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (arb_dest) {
+ case MC_CG_ARB_FREQ_F0:
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
+ mc_cg_config |= 0x0000000F;
+ cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
+
+ return 0;
+}
+
+static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
+/**
+* Initial switch from ARB F0->F1
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+* This function is to be called from the SetPowerState table.
+*/
+static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
+{
+ return smu7_copy_and_switch_arb_sets(hwmgr,
+ MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+ uint32_t tmp;
+
+ tmp = (cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+ 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return smu7_copy_and_switch_arb_sets(hwmgr,
+ tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = NULL;
+
+ uint32_t i, max_entry;
+ uint32_t tmp;
+
+ PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
+ data->use_pcie_power_saving_levels), "No pcie performance levels!",
+ return -EINVAL);
+
+ if (table_info != NULL)
+ pcie_table = table_info->pcie_table;
+
+ if (data->use_pcie_performance_levels &&
+ !data->use_pcie_power_saving_levels) {
+ data->pcie_gen_power_saving = data->pcie_gen_performance;
+ data->pcie_lane_power_saving = data->pcie_lane_performance;
+ } else if (!data->use_pcie_performance_levels &&
+ data->use_pcie_power_saving_levels) {
+ data->pcie_gen_performance = data->pcie_gen_power_saving;
+ data->pcie_lane_performance = data->pcie_lane_power_saving;
+ }
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
+ phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
+ tmp,
+ MAX_REGULAR_DPM_NUMBER);
+
+ if (pcie_table != NULL) {
+ /* max_entry is used to make sure we reserve one PCIE level
+ * for boot level (fix for A+A PSPP issue).
+ * If PCIE table from PPTable have ULV entry + 8 entries,
+ * then ignore the last entry.*/
+ max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
+ for (i = 1; i < max_entry; i++) {
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ pcie_table->entries[i].gen_speed),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ pcie_table->entries[i].lane_width));
+ }
+ data->dpm_table.pcie_speed_table.count = max_entry - 1;
+ smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
+ } else {
+ /* Hardcode Pcie Table */
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+
+ data->dpm_table.pcie_speed_table.count = 6;
+ }
+ /* Populate last level for boot PCIE level, but do not increment count. */
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+ data->dpm_table.pcie_speed_table.count,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+
+ return 0;
+}
+
+static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
+
+ phm_reset_single_dpm_table(
+ &data->dpm_table.sclk_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_GRAPHICS),
+ MAX_REGULAR_DPM_NUMBER);
+ phm_reset_single_dpm_table(
+ &data->dpm_table.mclk_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
+
+ phm_reset_single_dpm_table(
+ &data->dpm_table.vddc_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_VDDC),
+ MAX_REGULAR_DPM_NUMBER);
+ phm_reset_single_dpm_table(
+ &data->dpm_table.vddci_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
+
+ phm_reset_single_dpm_table(
+ &data->dpm_table.mvdd_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_MVDD),
+ MAX_REGULAR_DPM_NUMBER);
+ return 0;
+}
+/*
+ * This function is to initialize all DPM state tables
+ * for SMU7 based on the dependency table.
+ * Dynamic state patching function will then trim these
+ * state tables to the allowed range based
+ * on the power policy or external client requests,
+ * such as UVD request, etc.
+ */
+
+static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+ struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
+ hwmgr->dyn_state.vddc_dependency_on_mclk;
+ struct phm_cac_leakage_table *std_voltage_table =
+ hwmgr->dyn_state.cac_leakage_table;
+ uint32_t i;
+
+ PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
+ "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
+ "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+ "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
+ "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
+
+
+ /* Initialize Sclk DPM table based on allow Sclk values*/
+ data->dpm_table.sclk_table.count = 0;
+
+ for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
+ allowed_vdd_sclk_table->entries[i].clk) {
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+ allowed_vdd_sclk_table->entries[i].clk;
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+ data->dpm_table.sclk_table.count++;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+ "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
+ /* Initialize Mclk DPM table based on allow Mclk values */
+ data->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
+ allowed_vdd_mclk_table->entries[i].clk) {
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+ allowed_vdd_mclk_table->entries[i].clk;
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+ data->dpm_table.mclk_table.count++;
+ }
+ }
+
+ /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
+ for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+ data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+ data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
+ /* param1 is for corresponding std voltage */
+ data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+ }
+
+ data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
+ allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+ if (NULL != allowed_vdd_mclk_table) {
+ /* Initialize Vddci DPM table based on allow Mclk values */
+ for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+ data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+ data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
+ }
+ data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
+ }
+
+ allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
+
+ if (NULL != allowed_vdd_mclk_table) {
+ /*
+ * Initialize MVDD DPM table based on allow Mclk
+ * values
+ */
+ for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+ data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+ data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+ }
+ data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
+ }
+
+ return 0;
+}
+
+static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i;
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+
+ if (table_info == NULL)
+ return -EINVAL;
+
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+
+ PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
+ "SCLK dependency table is missing.",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
+ "SCLK dependency table count is 0.",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
+ "MCLK dependency table is missing.",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
+ "MCLK dependency table count is 0",
+ return -EINVAL);
+
+ /* Initialize Sclk DPM table based on allow Sclk values */
+ data->dpm_table.sclk_table.count = 0;
+ for (i = 0; i < dep_sclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
+ dep_sclk_table->entries[i].clk) {
+
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+ dep_sclk_table->entries[i].clk;
+
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
+ (i == 0) ? true : false;
+ data->dpm_table.sclk_table.count++;
+ }
+ }
+
+ /* Initialize Mclk DPM table based on allow Mclk values */
+ data->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.mclk_table.dpm_levels
+ [data->dpm_table.mclk_table.count - 1].value !=
+ dep_mclk_table->entries[i].clk) {
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+ dep_mclk_table->entries[i].clk;
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
+ (i == 0) ? true : false;
+ data->dpm_table.mclk_table.count++;
+ }
+ }
+
+ return 0;
+}
+
+int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ smu7_reset_dpm_tables(hwmgr);
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ smu7_setup_dpm_tables_v1(hwmgr);
+ else if (hwmgr->pp_table_version == PP_TABLE_V0)
+ smu7_setup_dpm_tables_v0(hwmgr);
+
+ smu7_setup_default_pcie_table(hwmgr);
+
+ /* save a copy of the default DPM table */
+ memcpy(&(data->golden_dpm_table), &(data->dpm_table),
+ sizeof(struct smu7_dpm_table));
+ return 0;
+}
+
+uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reference_clock, tmp;
+ struct cgs_display_info info = {0};
+ struct cgs_mode_info mode_info;
+
+ info.mode_info = &mode_info;
+
+ tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
+
+ if (tmp)
+ return TCLK;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ reference_clock = mode_info.ref_clock;
+
+ tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
+
+ if (0 != tmp)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
+static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
+{
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot))
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_EnableVRHotGPIOInterrupt);
+
+ return 0;
+}
+
+static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ SCLK_PWRMGT_OFF, 0);
+ return 0;
+}
+
+static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+
+ return 0;
+}
+
+static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+ return 0;
+}
+
+static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to enable Master Deep Sleep switch failed!",
+ return -EINVAL);
+ } else {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t soft_register_value = 0;
+ uint32_t handshake_disables_offset = data->soft_regs_start
+ + smum_get_offsetof(hwmgr->smumgr,
+ SMU_SoftRegisters, HandshakeDisables);
+
+ soft_register_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, handshake_disables_offset);
+ soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
+ SMU_UVD_MCLK_HANDSHAKE_DISABLE);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ handshake_disables_offset, soft_register_value);
+ return 0;
+}
+
+static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* enable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+ "Failed to enable SCLK DPM during DPM Start Function!",
+ return -EINVAL);
+
+ /* enable MCLK dpm */
+ if (0 == data->mclk_dpm_key_disabled) {
+ if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
+ smu7_disable_handshake_uvd(hwmgr);
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Enable)),
+ "Failed to enable MCLK DPM during DPM Start Function!",
+ return -EINVAL);
+
+ PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+ }
+
+ return 0;
+}
+
+static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /*enable general power management */
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 1);
+
+ /* enable sclk deep sleep */
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 1);
+
+ /* prepare for PCIE DPM */
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
+ VoltageChangeTimeout), 0x1000);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+ SWRST_COMMAND_1, RESETLC, 0x0);
+
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_Voltage_Cntl_Enable)),
+ "Failed to enable voltage DPM during DPM Start Function!",
+ return -EINVAL);
+
+
+ if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
+ return -EINVAL;
+ }
+
+ /* enable PCIE dpm */
+ if (0 == data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Enable)),
+ "Failed to enable pcie DPM during DPM Start Function!",
+ return -EINVAL);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition)) {
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_EnableACDCGPIOInterrupt)),
+ "Failed to enable AC DC GPIO Interrupt!",
+ );
+ }
+
+ return 0;
+}
+
+static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* disable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_DPM_Disable) == 0),
+ "Failed to disable SCLK DPM!",
+ return -EINVAL);
+
+ /* disable MCLK dpm */
+ if (!data->mclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Disable) == 0),
+ "Failed to disable MCLK DPM!",
+ return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* disable general power management */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 0);
+ /* disable sclk deep sleep */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 0);
+
+ /* disable PCIE dpm */
+ if (!data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Disable) == 0),
+ "Failed to disable pcie DPM during DPM Stop Function!",
+ return -EINVAL);
+ }
+
+ if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
+{
+ bool protection;
+ enum DPM_EVENT_SRC src;
+
+ switch (sources) {
+ default:
+ printk(KERN_ERR "Unknown throttling event sources.");
+ /* fall through */
+ case 0:
+ protection = false;
+ /* src is unused */
+ break;
+ case (1 << PHM_AutoThrottleSource_Thermal):
+ protection = true;
+ src = DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << PHM_AutoThrottleSource_External):
+ protection = true;
+ src = DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case (1 << PHM_AutoThrottleSource_External) |
+ (1 << PHM_AutoThrottleSource_Thermal):
+ protection = true;
+ src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+ break;
+ }
+ /* Order matters - don't enable thermal protection for the wrong source. */
+ if (protection) {
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+ DPM_EVENT_SRC, src);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ THERMAL_PROTECTION_DIS,
+ !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController));
+ } else
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ THERMAL_PROTECTION_DIS, 1);
+}
+
+static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (!(data->active_auto_throttle_sources & (1 << source))) {
+ data->active_auto_throttle_sources |= 1 << source;
+ smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->active_auto_throttle_sources & (1 << source)) {
+ data->active_auto_throttle_sources &= ~(1 << source);
+ smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ data->pcie_performance_request = true;
+
+ return 0;
+}
+
+int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result = 0;
+ int result = 0;
+
+ tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is already running right now, no need to enable DPM!",
+ return 0);
+
+ if (smu7_voltage_control(hwmgr)) {
+ tmp_result = smu7_enable_voltage_control(hwmgr);
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "Failed to enable voltage control!",
+ result = tmp_result);
+
+ tmp_result = smu7_construct_voltage_tables(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to contruct voltage tables!",
+ result = tmp_result);
+ }
+ smum_initialize_mc_reg_table(hwmgr);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
+
+ tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to program static screen threshold parameters!",
+ result = tmp_result);
+
+ tmp_result = smu7_enable_display_gap(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable display gap!", result = tmp_result);
+
+ tmp_result = smu7_program_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to program voting clients!", result = tmp_result);
+
+ tmp_result = smum_process_firmware_header(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to process firmware header!", result = tmp_result);
+
+ tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to initialize switch from ArbF0 to F1!",
+ result = tmp_result);
+
+ result = smu7_setup_default_dpm_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to setup default DPM tables!", return result);
+
+ tmp_result = smum_init_smc_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to initialize SMC table!", result = tmp_result);
+
+ tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+
+ tmp_result = smu7_enable_sclk_control(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable SCLK control!", result = tmp_result);
+
+ tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable voltage control!", result = tmp_result);
+
+ tmp_result = smu7_enable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable ULV!", result = tmp_result);
+
+ tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = smu7_enable_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to enable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = smu7_start_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to start DPM!", result = tmp_result);
+
+ tmp_result = smu7_enable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable SMC CAC!", result = tmp_result);
+
+ tmp_result = smu7_enable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable power containment!", result = tmp_result);
+
+ tmp_result = smu7_power_control_set_level(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to power control set level!", result = tmp_result);
+
+ tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = smu7_pcie_performance_request(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "pcie performance request failed!", result = tmp_result);
+
+ return 0;
+}
+
+int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+ tmp_result = smu7_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = smu7_disable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable SMC CAC!", result = tmp_result);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+ tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = smu7_stop_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = smu7_disable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable ULV!", result = tmp_result);
+
+ tmp_result = smu7_clear_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to clear voting clients!", result = tmp_result);
+
+ tmp_result = smu7_reset_to_default(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to reset to default!", result = tmp_result);
+
+ tmp_result = smu7_force_switch_to_arbf0(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to force to switch arbf0!", result = tmp_result);
+
+ return result;
+}
+
+int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
+{
+
+ return 0;
+}
+
+static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ data->dll_default_on = false;
+ data->mclk_dpm0_activity_target = 0xa;
+ data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
+ data->vddc_vddgfx_delta = 300;
+ data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
+ data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
+ data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
+ data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+ data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
+ data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+ data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+ data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+ data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+ data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+
+ data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+ data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
+ data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
+ /* need to set voltage control types before EVV patching */
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->enable_tdc_limit_feature = true;
+ data->enable_pkg_pwr_tracking_feature = true;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+ data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
+
+ data->fast_watermark_threshold = 100;
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDGFX)) {
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
+ data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ }
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableMVDDControl)) {
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+ data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
+ else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+ data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDGFX);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDCI)) {
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
+ else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ }
+
+ if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableMVDDControl);
+
+ if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDCI);
+
+ if ((hwmgr->pp_table_version != PP_TABLE_V0)
+ && (table_info->cac_dtp_table->usClockStretchAmount != 0))
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+
+ data->pcie_gen_performance.max = PP_PCIEGen1;
+ data->pcie_gen_performance.min = PP_PCIEGen3;
+ data->pcie_gen_power_saving.max = PP_PCIEGen1;
+ data->pcie_gen_power_saving.min = PP_PCIEGen3;
+ data->pcie_lane_performance.max = 0;
+ data->pcie_lane_performance.min = 16;
+ data->pcie_lane_power_saving.max = 0;
+ data->pcie_lane_power_saving.min = 16;
+}
+
+/**
+* Get Leakage VDDC based on leakage ID.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t vv_id;
+ uint16_t vddc = 0;
+ uint16_t vddgfx = 0;
+ uint16_t i, j;
+ uint32_t sclk = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
+
+
+ if (table_info != NULL)
+ sclk_table = table_info->vdd_dep_on_sclk;
+
+ for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
+ vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
+ table_info->vddgfx_lookup_table, vv_id, &sclk)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ for (j = 1; j < sclk_table->count; j++) {
+ if (sclk_table->entries[j].clk == sclk &&
+ sclk_table->entries[j].cks_enable == 0) {
+ sclk += 5000;
+ break;
+ }
+ }
+ }
+ if (0 == atomctrl_get_voltage_evv_on_sclk
+ (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
+ vv_id, &vddgfx)) {
+ /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
+ PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
+
+ /* the voltage should not be zero nor equal to leakage ID */
+ if (vddgfx != 0 && vddgfx != vv_id) {
+ data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
+ data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
+ data->vddcgfx_leakage.count++;
+ }
+ } else {
+ printk("Error retrieving EVV voltage value!\n");
+ }
+ }
+ } else {
+
+ if ((hwmgr->pp_table_version == PP_TABLE_V0)
+ || !phm_get_sclk_for_voltage_evv(hwmgr,
+ table_info->vddc_lookup_table, vv_id, &sclk)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ for (j = 1; j < sclk_table->count; j++) {
+ if (sclk_table->entries[j].clk == sclk &&
+ sclk_table->entries[j].cks_enable == 0) {
+ sclk += 5000;
+ break;
+ }
+ }
+ }
+
+ if (phm_get_voltage_evv_on_sclk(hwmgr,
+ VOLTAGE_TYPE_VDDC,
+ sclk, vv_id, &vddc) == 0) {
+ if (vddc >= 2000 || vddc == 0)
+ return -EINVAL;
+ } else {
+ printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+ continue;
+ }
+
+ /* the voltage should not be zero nor equal to leakage ID */
+ if (vddc != 0 && vddc != vv_id) {
+ data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
+ data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
+ data->vddc_leakage.count++;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pointer to changing voltage
+ * @param pointer to leakage table
+ */
+static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
+{
+ uint32_t index;
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 */
+ for (index = 0; index < leakage_table->count; index++) {
+ /* if this voltage matches a leakage voltage ID */
+ /* patch with actual leakage voltage */
+ if (leakage_table->leakage_id[index] == *voltage) {
+ *voltage = leakage_table->actual_voltage[index];
+ break;
+ }
+ }
+
+ if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+ printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+/**
+* Patch voltage lookup table by EVV leakages.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pointer to voltage lookup table
+* @param pointer to leakage table
+* @return always 0
+*/
+static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_voltage_lookup_table *lookup_table,
+ struct smu7_leakage_voltage *leakage_table)
+{
+ uint32_t i;
+
+ for (i = 0; i < lookup_table->count; i++)
+ smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
+ &lookup_table->entries[i].us_vdd, leakage_table);
+
+ return 0;
+}
+
+static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
+ struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
+ uint16_t *vddc)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
+ table_info->max_clock_voltage_on_dc.vddc;
+ return 0;
+}
+
+static int smu7_patch_voltage_dependency_tables_with_lookup_table(
+ struct pp_hwmgr *hwmgr)
+{
+ uint8_t entry_id;
+ uint8_t voltage_id;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+ table_info->vdd_dep_on_mclk;
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+ voltage_id = sclk_table->entries[entry_id].vddInd;
+ sclk_table->entries[entry_id].vddgfx =
+ table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
+ }
+ } else {
+ for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+ voltage_id = sclk_table->entries[entry_id].vddInd;
+ sclk_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
+ }
+
+ for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
+ voltage_id = mclk_table->entries[entry_id].vddInd;
+ mclk_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
+
+ for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
+ voltage_id = mm_table->entries[entry_id].vddcInd;
+ mm_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
+
+ return 0;
+
+}
+
+static int phm_add_voltage(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_voltage_lookup_table *look_up_table,
+ phm_ppt_v1_voltage_lookup_record *record)
+{
+ uint32_t i;
+
+ PP_ASSERT_WITH_CODE((NULL != look_up_table),
+ "Lookup Table empty.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((0 != look_up_table->count),
+ "Lookup Table empty.", return -EINVAL);
+
+ i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ PP_ASSERT_WITH_CODE((i >= look_up_table->count),
+ "Lookup Table is full.", return -EINVAL);
+
+ /* This is to avoid entering duplicate calculated records. */
+ for (i = 0; i < look_up_table->count; i++) {
+ if (look_up_table->entries[i].us_vdd == record->us_vdd) {
+ if (look_up_table->entries[i].us_calculated == 1)
+ return 0;
+ break;
+ }
+ }
+
+ look_up_table->entries[i].us_calculated = 1;
+ look_up_table->entries[i].us_vdd = record->us_vdd;
+ look_up_table->entries[i].us_cac_low = record->us_cac_low;
+ look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
+ look_up_table->entries[i].us_cac_high = record->us_cac_high;
+ /* Only increment the count when we're appending, not replacing duplicate entry. */
+ if (i == look_up_table->count)
+ look_up_table->count++;
+
+ return 0;
+}
+
+
+static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+ uint8_t entry_id;
+ struct phm_ppt_v1_voltage_lookup_record v_record;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
+ phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+ if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
+ v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
+ sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
+ else
+ v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
+ sclk_table->entries[entry_id].vdd_offset;
+
+ sclk_table->entries[entry_id].vddc =
+ v_record.us_cac_low = v_record.us_cac_mid =
+ v_record.us_cac_high = v_record.us_vdd;
+
+ phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
+ }
+
+ for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
+ if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
+ v_record.us_vdd = mclk_table->entries[entry_id].vddc +
+ mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
+ else
+ v_record.us_vdd = mclk_table->entries[entry_id].vddc +
+ mclk_table->entries[entry_id].vdd_offset;
+
+ mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
+ v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
+ phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
+ }
+ }
+ return 0;
+}
+
+static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
+{
+ uint8_t entry_id;
+ struct phm_ppt_v1_voltage_lookup_record v_record;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
+ if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
+ v_record.us_vdd = mm_table->entries[entry_id].vddc +
+ mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
+ else
+ v_record.us_vdd = mm_table->entries[entry_id].vddc +
+ mm_table->entries[entry_id].vddgfx_offset;
+
+ /* Add the calculated VDDGFX to the VDDGFX lookup table */
+ mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
+ v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
+ phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
+ }
+ }
+ return 0;
+}
+
+static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table)
+{
+ uint32_t table_size, i, j;
+ struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
+ table_size = lookup_table->count;
+
+ PP_ASSERT_WITH_CODE(0 != lookup_table->count,
+ "Lookup table is empty", return -EINVAL);
+
+ /* Sorting voltages */
+ for (i = 0; i < table_size - 1; i++) {
+ for (j = i + 1; j > 0; j--) {
+ if (lookup_table->entries[j].us_vdd <
+ lookup_table->entries[j - 1].us_vdd) {
+ tmp_voltage_lookup_record = lookup_table->entries[j - 1];
+ lookup_table->entries[j - 1] = lookup_table->entries[j];
+ lookup_table->entries[j] = tmp_voltage_lookup_record;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ int tmp_result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
+ table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
+ if (tmp_result != 0)
+ result = tmp_result;
+
+ smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
+ &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
+ } else {
+
+ tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
+ table_info->vddc_lookup_table, &(data->vddc_leakage));
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
+ &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
+ if (tmp_result)
+ result = tmp_result;
+ }
+
+ tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
+ if (tmp_result)
+ result = tmp_result;
+
+ return result;
+}
+
+static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
+ table_info->vdd_dep_on_sclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
+ table_info->vdd_dep_on_mclk;
+
+ PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
+ "VDD dependency on SCLK table is missing.",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
+ "VDD dependency on SCLK table has to have is missing.",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
+ "VDD dependency on MCLK table is missing",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
+ "VDD dependency on MCLK table has to have is missing.",
+ return -EINVAL);
+
+ table_info->max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
+ table_info->max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
+ table_info->max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
+ table_info->max_clock_voltage_on_ac.vddci =
+ allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
+
+ hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
+
+ return 0;
+}
+
+int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table;
+ uint32_t i;
+ uint32_t hw_revision, sub_vendor_id, sub_sys_id;
+ struct cgs_system_info sys_info = {0};
+
+ if (table_info != NULL) {
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+ lookup_table = table_info->vddc_lookup_table;
+ } else
+ return 0;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ hw_revision = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ sub_sys_id = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ sub_vendor_id = (uint32_t)sys_info.value;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
+ ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
+ (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
+ (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
+ if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+ return 0;
+
+ for (i = 0; i < lookup_table->count; i++) {
+ if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+ dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
+{
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+ uint32_t temp_reg;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+ temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
+ switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
+ case 0:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
+ break;
+ case 1:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
+ break;
+ case 2:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
+ break;
+ case 3:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
+ break;
+ case 4:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
+ break;
+ default:
+ PP_ASSERT_WITH_CODE(0,
+ "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
+ );
+ break;
+ }
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
+ }
+
+ if (table_info == NULL)
+ return 0;
+
+ if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
+ hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
+
+ table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
+ (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
+
+ table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+ table_info->cac_dtp_table->usOperatingTempStep = 1;
+ table_info->cac_dtp_table->usOperatingTempHyst = 1;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
+ table_info->cac_dtp_table->usOperatingTempMinLimit;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
+ table_info->cac_dtp_table->usOperatingTempMaxLimit;
+
+ hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
+ table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
+ table_info->cac_dtp_table->usOperatingTempStep;
+
+ hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
+ table_info->cac_dtp_table->usTargetOperatingTemp;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODFuzzyFanControlSupport);
+ }
+
+ return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pointer to changing voltage
+ * @param pointer to leakage table
+ */
+static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+ uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
+{
+ uint32_t index;
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 */
+ for (index = 0; index < leakage_table->count; index++) {
+ /* if this voltage matches a leakage voltage ID */
+ /* patch with actual leakage voltage */
+ if (leakage_table->leakage_id[index] == *voltage) {
+ *voltage = leakage_table->actual_voltage[index];
+ break;
+ }
+ }
+
+ if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+ printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+
+static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddci_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_vce_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+
+static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_uvd_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
+ struct phm_phase_shedding_limits_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_samu_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_acp_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_clock_and_voltage_limits *tab)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab) {
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
+ &data->vddc_leakage);
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
+ &data->vddci_leakage);
+ }
+
+ return 0;
+}
+
+static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
+{
+ uint32_t i;
+ uint32_t vddc;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab) {
+ for (i = 0; i < tab->count; i++) {
+ vddc = (uint32_t)(tab->entries[i].Vddc);
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
+ tab->entries[i].Vddc = (uint16_t)vddc;
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
+{
+ int tmp;
+
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
+ if (tmp)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+ struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+ PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
+ "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
+ "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
+ "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
+ "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+ data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
+ data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+ hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+ if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
+ data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
+ data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+ }
+
+ if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
+
+ return 0;
+}
+
+int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data;
+ int result;
+
+ data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
+ smu7_patch_voltage_workaround(hwmgr);
+ smu7_init_dpm_defaults(hwmgr);
+
+ /* Get leakage voltage based on leakage ID. */
+ result = smu7_get_evv_voltages(hwmgr);
+
+ if (result) {
+ printk("Get EVV Voltage Failed. Abort Driver loading!\n");
+ return -EINVAL;
+ }
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ smu7_complete_dependency_tables(hwmgr);
+ smu7_set_private_data_based_on_pptable_v1(hwmgr);
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ smu7_patch_dependency_tables_with_leakage(hwmgr);
+ smu7_set_private_data_based_on_pptable_v0(hwmgr);
+ }
+
+ /* Initalize Dynamic State Adjustment Rule Settings */
+ result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+
+ if (0 == result) {
+ struct cgs_system_info sys_info = {0};
+
+ data->is_tlu_enabled = false;
+
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ SMU7_MAX_HARDWARE_POWERLEVELS;
+ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (result)
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+ else
+ data->pcie_gen_cap = (uint32_t)sys_info.value;
+ if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ data->pcie_spc_cap = 20;
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (result)
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
+ else
+ data->pcie_lane_cap = (uint32_t)sys_info.value;
+
+ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+ hwmgr->platform_descriptor.clockStep.engineClock = 500;
+ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+ smu7_thermal_parameter_init(hwmgr);
+ } else {
+ /* Ignore return value in here, we are cleaning up a mess. */
+ phm_hwmgr_backend_fini(hwmgr);
+ }
+
+ return 0;
+}
+
+static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t level, tmp;
+
+ if (!data->pcie_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel, level);
+ }
+ }
+
+ if (!data->sclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ phm_apply_dal_min_voltage_request(hwmgr);
+/* TO DO for v0 iceland and Ci*/
+
+ if (!data->sclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+
+ return 0;
+}
+
+static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (!smum_is_dpm_running(hwmgr))
+ return -EINVAL;
+
+ if (!data->pcie_dpm_key_disabled) {
+ smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_UnForceLevel);
+ }
+
+ return smu7_upload_dpm_level_enable_mask(hwmgr);
+}
+
+static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data =
+ (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t level;
+
+ if (!data->sclk_dpm_key_disabled)
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ (1 << level));
+
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ if (!data->pcie_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ (level));
+ }
+ }
+
+ return 0;
+
+}
+static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu7_force_dpm_highest(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu7_force_dpm_lowest(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = smu7_unforce_dpm_levels(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ hwmgr->dpm_level = level;
+
+ return ret;
+}
+
+static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+ return sizeof(struct smu7_power_state);
+}
+
+
+static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *request_ps,
+ const struct pp_power_state *current_ps)
+{
+
+ struct smu7_power_state *smu7_ps =
+ cast_phw_smu7_power_state(&request_ps->hardware);
+ uint32_t sclk;
+ uint32_t mclk;
+ struct PP_Clocks minimum_clocks = {0};
+ bool disable_mclk_switching;
+ bool disable_mclk_switching_for_frame_lock;
+ struct cgs_display_info info = {0};
+ const struct phm_clock_and_voltage_limits *max_limits;
+ uint32_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int32_t count;
+ int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+
+ data->battery_state = (PP_StateUILabel_Battery ==
+ request_ps->classification.ui_label);
+
+ PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
+ "VI should always have 2 performance levels",
+ );
+
+ max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+ &(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+ /* Cap clock DPM tables at DC MAX if it is in DC. */
+ if (PP_PowerSource_DC == hwmgr->power_source) {
+ for (i = 0; i < smu7_ps->performance_level_count; i++) {
+ if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
+ smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
+ if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
+ smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
+ }
+ }
+
+ smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
+ smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
+
+ minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
+ minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState)) {
+ max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
+ stable_pstate_sclk = (max_limits->sclk * 75) / 100;
+
+ for (count = table_info->vdd_dep_on_sclk->count - 1;
+ count >= 0; count--) {
+ if (stable_pstate_sclk >=
+ table_info->vdd_dep_on_sclk->entries[count].clk) {
+ stable_pstate_sclk =
+ table_info->vdd_dep_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+
+ if (count < 0)
+ stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+
+ stable_pstate_mclk = max_limits->mclk;
+
+ minimum_clocks.engineClock = stable_pstate_sclk;
+ minimum_clocks.memoryClock = stable_pstate_mclk;
+ }
+
+ if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
+ minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
+
+ if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
+ minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
+
+ smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
+
+ if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
+ PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
+ hwmgr->platform_descriptor.overdriveLimit.engineClock),
+ "Overdrive sclk exceeds limit",
+ hwmgr->gfx_arbiter.sclk_over_drive =
+ hwmgr->platform_descriptor.overdriveLimit.engineClock);
+
+ if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
+ smu7_ps->performance_levels[1].engine_clock =
+ hwmgr->gfx_arbiter.sclk_over_drive;
+ }
+
+ if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
+ PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock),
+ "Overdrive mclk exceeds limit",
+ hwmgr->gfx_arbiter.mclk_over_drive =
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+
+ if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
+ smu7_ps->performance_levels[1].memory_clock =
+ hwmgr->gfx_arbiter.mclk_over_drive;
+ }
+
+ disable_mclk_switching_for_frame_lock = phm_cap_enabled(
+ hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+
+ disable_mclk_switching = (1 < info.display_count) ||
+ disable_mclk_switching_for_frame_lock;
+
+ sclk = smu7_ps->performance_levels[0].engine_clock;
+ mclk = smu7_ps->performance_levels[0].memory_clock;
+
+ if (disable_mclk_switching)
+ mclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].memory_clock;
+
+ if (sclk < minimum_clocks.engineClock)
+ sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
+ max_limits->sclk : minimum_clocks.engineClock;
+
+ if (mclk < minimum_clocks.memoryClock)
+ mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
+ max_limits->mclk : minimum_clocks.memoryClock;
+
+ smu7_ps->performance_levels[0].engine_clock = sclk;
+ smu7_ps->performance_levels[0].memory_clock = mclk;
+
+ smu7_ps->performance_levels[1].engine_clock =
+ (smu7_ps->performance_levels[1].engine_clock >=
+ smu7_ps->performance_levels[0].engine_clock) ?
+ smu7_ps->performance_levels[1].engine_clock :
+ smu7_ps->performance_levels[0].engine_clock;
+
+ if (disable_mclk_switching) {
+ if (mclk < smu7_ps->performance_levels[1].memory_clock)
+ mclk = smu7_ps->performance_levels[1].memory_clock;
+
+ smu7_ps->performance_levels[0].memory_clock = mclk;
+ smu7_ps->performance_levels[1].memory_clock = mclk;
+ } else {
+ if (smu7_ps->performance_levels[1].memory_clock <
+ smu7_ps->performance_levels[0].memory_clock)
+ smu7_ps->performance_levels[1].memory_clock =
+ smu7_ps->performance_levels[0].memory_clock;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState)) {
+ for (i = 0; i < smu7_ps->performance_level_count; i++) {
+ smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
+ smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
+ smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
+ smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
+ }
+ }
+ return 0;
+}
+
+
+static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct pp_power_state *ps;
+ struct smu7_power_state *smu7_ps;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+ if (low)
+ return smu7_ps->performance_levels[0].memory_clock;
+ else
+ return smu7_ps->performance_levels
+ [smu7_ps->performance_level_count-1].memory_clock;
+}
+
+static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct pp_power_state *ps;
+ struct smu7_power_state *smu7_ps;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+ if (low)
+ return smu7_ps->performance_levels[0].engine_clock;
+ else
+ return smu7_ps->performance_levels
+ [smu7_ps->performance_level_count-1].engine_clock;
+}
+
+static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+ struct pp_hw_power_state *hw_ps)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
+ ATOM_FIRMWARE_INFO_V2_2 *fw_info;
+ uint16_t size;
+ uint8_t frev, crev;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+
+ /* First retrieve the Boot clocks and VDDC from the firmware info table.
+ * We assume here that fw_info is unchanged if this call fails.
+ */
+ fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
+ hwmgr->device, index,
+ &size, &frev, &crev);
+ if (!fw_info)
+ /* During a test, there is no firmware info table. */
+ return 0;
+
+ /* Patch the state. */
+ data->vbios_boot_state.sclk_bootup_value =
+ le32_to_cpu(fw_info->ulDefaultEngineClock);
+ data->vbios_boot_state.mclk_bootup_value =
+ le32_to_cpu(fw_info->ulDefaultMemoryClock);
+ data->vbios_boot_state.mvdd_bootup_value =
+ le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
+ data->vbios_boot_state.vddc_bootup_value =
+ le16_to_cpu(fw_info->usBootUpVDDCVoltage);
+ data->vbios_boot_state.vddci_bootup_value =
+ le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
+ data->vbios_boot_state.pcie_gen_bootup_value =
+ smu7_get_current_pcie_speed(hwmgr);
+
+ data->vbios_boot_state.pcie_lane_bootup_value =
+ (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
+
+ /* set boot power state */
+ ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
+ ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
+ ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
+ ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
+
+ return 0;
+}
+
+static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ unsigned long ret = 0;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ result = pp_tables_get_num_of_entries(hwmgr, &ret);
+ return result ? 0 : ret;
+ } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
+ return result;
+ }
+ return 0;
+}
+
+static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
+ void *state, struct pp_power_state *power_state,
+ void *pp_table, uint32_t classification_flag)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_power_state *smu7_power_state =
+ (struct smu7_power_state *)(&(power_state->hardware));
+ struct smu7_performance_level *performance_level;
+ ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
+ ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
+ (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
+ PPTable_Generic_SubTable_Header *sclk_dep_table =
+ (PPTable_Generic_SubTable_Header *)
+ (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
+ ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
+ (ATOM_Tonga_MCLK_Dependency_Table *)
+ (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
+
+ /* The following fields are not initialized here: id orderedList allStatesList */
+ power_state->classification.ui_label =
+ (le16_to_cpu(state_entry->usClassification) &
+ ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
+ ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
+ power_state->classification.flags = classification_flag;
+ /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
+
+ power_state->classification.temporary_state = false;
+ power_state->classification.to_be_deleted = false;
+
+ power_state->validation.disallowOnDC =
+ (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+ ATOM_Tonga_DISALLOW_ON_DC));
+
+ power_state->pcie.lanes = 0;
+
+ power_state->display.disableFrameModulation = false;
+ power_state->display.limitRefreshrate = false;
+ power_state->display.enableVariBright =
+ (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+ ATOM_Tonga_ENABLE_VARIBRIGHT));
+
+ power_state->validation.supportedPowerLevels = 0;
+ power_state->uvd_clocks.VCLK = 0;
+ power_state->uvd_clocks.DCLK = 0;
+ power_state->temperatures.min = 0;
+ power_state->temperatures.max = 0;
+
+ performance_level = &(smu7_power_state->performance_levels
+ [smu7_power_state->performance_level_count++]);
+
+ PP_ASSERT_WITH_CODE(
+ (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ "Performance levels exceeds SMC limit!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(
+ (smu7_power_state->performance_level_count <=
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+ "Performance levels exceeds Driver limit!",
+ return -EINVAL);
+
+ /* Performance levels are arranged from low to high. */
+ performance_level->memory_clock = mclk_dep_table->entries
+ [state_entry->ucMemoryClockIndexLow].ulMclk;
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexLow].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexLow].ulSclk;
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+ state_entry->ucPCIEGenLow);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+ state_entry->ucPCIELaneHigh);
+
+ performance_level = &(smu7_power_state->performance_levels
+ [smu7_power_state->performance_level_count++]);
+ performance_level->memory_clock = mclk_dep_table->entries
+ [state_entry->ucMemoryClockIndexHigh].ulMclk;
+
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexHigh].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexHigh].ulSclk;
+
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+ state_entry->ucPCIEGenHigh);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+ state_entry->ucPCIELaneHigh);
+
+ return 0;
+}
+
+static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
+ unsigned long entry_index, struct pp_power_state *state)
+{
+ int result;
+ struct smu7_power_state *ps;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+ table_info->vdd_dep_on_mclk;
+
+ state->hardware.magic = PHM_VIslands_Magic;
+
+ ps = (struct smu7_power_state *)(&state->hardware);
+
+ result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
+ smu7_get_pp_table_entry_callback_func_v1);
+
+ /* This is the earliest time we have all the dependency table and the VBIOS boot state
+ * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
+ * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
+ */
+ if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+ if (dep_mclk_table->entries[0].clk !=
+ data->vbios_boot_state.mclk_bootup_value)
+ printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot MCLK level");
+ if (dep_mclk_table->entries[0].vddci !=
+ data->vbios_boot_state.vddci_bootup_value)
+ printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot VDDCI level");
+ }
+
+ /* set DC compatible flag if this state supports DC */
+ if (!state->validation.disallowOnDC)
+ ps->dc_compatible = true;
+
+ if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+ data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+ ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+ ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+ if (!result) {
+ uint32_t i;
+
+ switch (state->classification.ui_label) {
+ case PP_StateUILabel_Performance:
+ data->use_pcie_performance_levels = true;
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_performance.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_performance.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_performance.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.max =
+ ps->performance_levels[i].pcie_lane;
+ if (data->pcie_lane_performance.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ case PP_StateUILabel_Battery:
+ data->use_pcie_power_saving_levels = true;
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_power_saving.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_power_saving.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_power_saving.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.max =
+ ps->performance_levels[i].pcie_lane;
+
+ if (data->pcie_lane_power_saving.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
+ struct pp_hw_power_state *power_state,
+ unsigned int index, const void *clock_info)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
+ const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
+ struct smu7_performance_level *performance_level;
+ uint32_t engine_clock, memory_clock;
+ uint16_t pcie_gen_from_bios;
+
+ engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
+ memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
+
+ if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
+ data->highest_mclk = memory_clock;
+
+ performance_level = &(ps->performance_levels
+ [ps->performance_level_count++]);
+
+ PP_ASSERT_WITH_CODE(
+ (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ "Performance levels exceeds SMC limit!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(
+ (ps->performance_level_count <=
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+ "Performance levels exceeds Driver limit!",
+ return -EINVAL);
+
+ /* Performance levels are arranged from low to high. */
+ performance_level->memory_clock = memory_clock;
+ performance_level->engine_clock = engine_clock;
+
+ pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
+
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
+
+ return 0;
+}
+
+static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
+ unsigned long entry_index, struct pp_power_state *state)
+{
+ int result;
+ struct smu7_power_state *ps;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_clock_voltage_dependency_table *dep_mclk_table =
+ hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+ memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
+
+ state->hardware.magic = PHM_VIslands_Magic;
+
+ ps = (struct smu7_power_state *)(&state->hardware);
+
+ result = pp_tables_get_entry(hwmgr, entry_index, state,
+ smu7_get_pp_table_entry_callback_func_v0);
+
+ /*
+ * This is the earliest time we have all the dependency table
+ * and the VBIOS boot state as
+ * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
+ * state if there is only one VDDCI/MCLK level, check if it's
+ * the same as VBIOS boot state
+ */
+ if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+ if (dep_mclk_table->entries[0].clk !=
+ data->vbios_boot_state.mclk_bootup_value)
+ printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot MCLK level");
+ if (dep_mclk_table->entries[0].v !=
+ data->vbios_boot_state.vddci_bootup_value)
+ printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot VDDCI level");
+ }
+
+ /* set DC compatible flag if this state supports DC */
+ if (!state->validation.disallowOnDC)
+ ps->dc_compatible = true;
+
+ if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+ data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+ ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+ ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+ if (!result) {
+ uint32_t i;
+
+ switch (state->classification.ui_label) {
+ case PP_StateUILabel_Performance:
+ data->use_pcie_performance_levels = true;
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_performance.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_performance.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_performance.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.max =
+ ps->performance_levels[i].pcie_lane;
+
+ if (data->pcie_lane_performance.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ case PP_StateUILabel_Battery:
+ data->use_pcie_power_saving_levels = true;
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_power_saving.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_power_saving.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_power_saving.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.max =
+ ps->performance_levels[i].pcie_lane;
+
+ if (data->pcie_lane_power_saving.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+ unsigned long entry_index, struct pp_power_state *state)
+{
+ if (hwmgr->pp_table_version == PP_TABLE_V0)
+ return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
+ else if (hwmgr->pp_table_version == PP_TABLE_V1)
+ return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
+
+ return 0;
+}
+
+static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+ uint32_t sclk, mclk, activity_percent;
+ uint32_t offset;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *value = sclk;
+ return 0;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *value = mclk;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ SMU_SoftRegisters,
+ AverageGraphicsActivity);
+
+ activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
+ activity_percent += 0x80;
+ activity_percent >>= 8;
+ *value = activity_percent > 100 ? 100 : activity_percent;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *value = smu7_thermal_get_temperature(hwmgr);
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *value = data->uvd_power_gated ? 0 : 1;
+ return 0;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *value = data->vce_power_gated ? 0 : 1;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ uint32_t sclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].engine_clock;
+ struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ uint32_t mclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].memory_clock;
+ struct PP_Clocks min_clocks = {0};
+ uint32_t i;
+ struct cgs_display_info info = {0};
+
+ data->need_update_smu7_dpm_table = 0;
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= sclk_table->count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ else {
+ /* TODO: Check SCLK in DAL's minimum clocks
+ * in case DeepSleep divider update is required.
+ */
+ if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
+ (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
+ data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+ data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+ }
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (mclk == mclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= mclk_table->count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+
+ return 0;
+}
+
+static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
+ const struct smu7_power_state *smu7_ps)
+{
+ uint32_t i;
+ uint32_t sclk, max_sclk = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+
+ for (i = 0; i < smu7_ps->performance_level_count; i++) {
+ sclk = smu7_ps->performance_levels[i].engine_clock;
+ if (max_sclk < sclk)
+ max_sclk = sclk;
+ }
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
+ return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
+ dpm_table->pcie_speed_table.dpm_levels
+ [dpm_table->pcie_speed_table.count - 1].value :
+ dpm_table->pcie_speed_table.dpm_levels[i].value);
+ }
+
+ return 0;
+}
+
+static int smu7_request_link_speed_change_before_state_change(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct smu7_power_state *smu7_nps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+ const struct smu7_power_state *polaris10_cps =
+ cast_const_phw_smu7_power_state(states->pcurrent_state);
+
+ uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
+ uint16_t current_link_speed;
+
+ if (data->force_pcie_gen == PP_PCIEGenInvalid)
+ current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
+ else
+ current_link_speed = data->force_pcie_gen;
+
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+ data->pspp_notify_required = false;
+
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+ case PP_PCIEGen3:
+ if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
+ break;
+ data->force_pcie_gen = PP_PCIEGen2;
+ if (current_link_speed == PP_PCIEGen2)
+ break;
+ case PP_PCIEGen2:
+ if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
+ break;
+ default:
+ data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ data->pspp_notify_required = true;
+ }
+
+ return 0;
+}
+
+static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((0 == data->sclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_FreezeLevel),
+ "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
+ return -EINVAL);
+ }
+
+ if ((0 == data->mclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ DPMTABLE_OD_UPDATE_MCLK)) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_FreezeLevel),
+ "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
+ return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ int result = 0;
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t sclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].engine_clock;
+ uint32_t mclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].memory_clock;
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+
+ struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+ uint32_t dpm_count, clock_percent;
+ uint32_t i;
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ dpm_table->sclk_table.dpm_levels
+ [dpm_table->sclk_table.count - 1].value = sclk;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ /* Need to do calculation based on the golden DPM table
+ * as the Heatmap GPU Clock axis is also based on the default values
+ */
+ PP_ASSERT_WITH_CODE(
+ (golden_dpm_table->sclk_table.dpm_levels
+ [golden_dpm_table->sclk_table.count - 1].value != 0),
+ "Divide by 0!",
+ return -EINVAL);
+ dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
+
+ for (i = dpm_count; i > 1; i--) {
+ if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
+ clock_percent =
+ ((sclk
+ - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
+ ) * 100)
+ / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value +
+ (golden_dpm_table->sclk_table.dpm_levels[i].value *
+ clock_percent)/100;
+
+ } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
+ clock_percent =
+ ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
+ - sclk) * 100)
+ / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value -
+ (golden_dpm_table->sclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+ } else
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value;
+ }
+ }
+ }
+
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+ dpm_table->mclk_table.dpm_levels
+ [dpm_table->mclk_table.count - 1].value = mclk;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+
+ PP_ASSERT_WITH_CODE(
+ (golden_dpm_table->mclk_table.dpm_levels
+ [golden_dpm_table->mclk_table.count-1].value != 0),
+ "Divide by 0!",
+ return -EINVAL);
+ dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
+ for (i = dpm_count; i > 1; i--) {
+ if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
+ clock_percent = ((mclk -
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
+ / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value +
+ (golden_dpm_table->mclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+
+ } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
+ clock_percent = (
+ (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
+ * 100)
+ / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value -
+ (golden_dpm_table->mclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+ } else
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value;
+ }
+ }
+ }
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+ result = smum_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+ return result);
+ }
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ /*populate MCLK dpm table to SMU7 */
+ result = smum_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+ return result);
+ }
+
+ return result;
+}
+
+static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+ struct smu7_single_dpm_table *dpm_table,
+ uint32_t low_limit, uint32_t high_limit)
+{
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if ((dpm_table->dpm_levels[i].value < low_limit)
+ || (dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ return 0;
+}
+
+static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
+ const struct smu7_power_state *smu7_ps)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t high_limit_count;
+
+ PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
+ "power state did not have any performance level",
+ return -EINVAL);
+
+ high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
+
+ smu7_trim_single_dpm_states(hwmgr,
+ &(data->dpm_table.sclk_table),
+ smu7_ps->performance_levels[0].engine_clock,
+ smu7_ps->performance_levels[high_limit_count].engine_clock);
+
+ smu7_trim_single_dpm_states(hwmgr,
+ &(data->dpm_table.mclk_table),
+ smu7_ps->performance_levels[0].memory_clock,
+ smu7_ps->performance_levels[high_limit_count].memory_clock);
+
+ return 0;
+}
+
+static int smu7_generate_dpm_level_enable_mask(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ int result;
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+
+ result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+ if (result)
+ return result;
+
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
+
+ return 0;
+}
+
+static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((0 == data->sclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
+ return -EINVAL);
+ }
+
+ if ((0 == data->mclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
+ return -EINVAL);
+ }
+
+ data->need_update_smu7_dpm_table = 0;
+
+ return 0;
+}
+
+static int smu7_notify_link_speed_change_after_state_change(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+ uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
+ uint8_t request;
+
+ if (data->pspp_notify_required) {
+ if (target_link_speed == PP_PCIEGen3)
+ request = PCIE_PERF_REQ_GEN3;
+ else if (target_link_speed == PP_PCIEGen2)
+ request = PCIE_PERF_REQ_GEN2;
+ else
+ request = PCIE_PERF_REQ_GEN1;
+
+ if (request == PCIE_PERF_REQ_GEN1 &&
+ smu7_get_current_pcie_speed(hwmgr) > 0)
+ return 0;
+
+ if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
+ if (PP_PCIEGen2 == target_link_speed)
+ printk("PSPP request to switch to Gen2 from Gen3 Failed!");
+ else
+ printk("PSPP request to switch to Gen1 from Gen2 Failed!");
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+}
+
+static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ int tmp_result, result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to find DPM states clocks in DPM table!",
+ result = tmp_result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PCIEPerformanceRequest)) {
+ tmp_result =
+ smu7_request_link_speed_change_before_state_change(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to request link speed change before state change!",
+ result = tmp_result);
+ }
+
+ tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
+
+ tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to populate and upload SCLK MCLK DPM levels!",
+ result = tmp_result);
+
+ tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to generate DPM level enabled mask!",
+ result = tmp_result);
+
+ tmp_result = smum_update_sclk_threshold(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to update SCLK threshold!",
+ result = tmp_result);
+
+ tmp_result = smu7_notify_smc_display(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to notify smc display settings!",
+ result = tmp_result);
+
+ tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to unfreeze SCLK MCLK DPM!",
+ result = tmp_result);
+
+ tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to upload DPM level enabled mask!",
+ result = tmp_result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PCIEPerformanceRequest)) {
+ tmp_result =
+ smu7_notify_link_speed_change_after_state_change(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to notify link speed change after state change!",
+ result = tmp_result);
+ }
+ data->apply_optimized_settings = false;
+ return result;
+}
+
+static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
+{
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
+
+ if (phm_is_hw_access_blocked(hwmgr))
+ return 0;
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+}
+
+int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+{
+ PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
+
+ return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
+}
+
+int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+{
+ uint32_t num_active_displays = 0;
+ struct cgs_display_info info = {0};
+
+ info.mode_info = NULL;
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ num_active_displays = info.display_count;
+
+ if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
+ smu7_notify_smc_display_change(hwmgr, false);
+
+ return 0;
+}
+
+/**
+* Programs the display gap
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always OK
+*/
+int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t num_active_displays = 0;
+ uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
+ uint32_t display_gap2;
+ uint32_t pre_vbi_time_in_us;
+ uint32_t frame_time_in_us;
+ uint32_t ref_clock;
+ uint32_t refresh_rate = 0;
+ struct cgs_display_info info = {0};
+ struct cgs_mode_info mode_info;
+
+ info.mode_info = &mode_info;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ num_active_displays = info.display_count;
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+ ref_clock = mode_info.ref_clock;
+ refresh_rate = mode_info.refresh_rate;
+
+ if (0 == refresh_rate)
+ refresh_rate = 60;
+
+ frame_time_in_us = 1000000 / refresh_rate;
+
+ pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+ data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
+ display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ SMU_SoftRegisters,
+ PreVBlankGap), 0x64);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ SMU_SoftRegisters,
+ VBlankTimeout),
+ (frame_time_in_us - pre_vbi_time_in_us));
+
+ return 0;
+}
+
+int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+ return smu7_program_display_gap(hwmgr);
+}
+
+/**
+* Set maximum target operating fan output RPM
+*
+* @param hwmgr: the address of the powerplay hardware manager.
+* @param usMaxFanRpm: max operating fan RPM value.
+* @return The response that came from the SMC.
+*/
+static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
+{
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
+
+ if (phm_is_hw_access_blocked(hwmgr))
+ return 0;
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+}
+
+int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+ const void *thermal_interrupt_info)
+{
+ return 0;
+}
+
+bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ bool is_update_required = false;
+ struct cgs_display_info info = {0, 0, NULL};
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ is_update_required = true;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
+ (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
+ hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+ is_update_required = true;
+ }
+ return is_update_required;
+}
+
+static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
+ const struct smu7_performance_level *pl2)
+{
+ return ((pl1->memory_clock == pl2->memory_clock) &&
+ (pl1->engine_clock == pl2->engine_clock) &&
+ (pl1->pcie_gen == pl2->pcie_gen) &&
+ (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+ const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
+ const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
+ int i;
+
+ if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
+ return -EINVAL;
+
+ /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+ if (psa->performance_level_count != psb->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < psa->performance_level_count; i++) {
+ if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+ /* If we have found even one performance level pair that is different the states are different. */
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+ *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+ *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+
+ return 0;
+}
+
+int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t vbios_version;
+ uint32_t tmp;
+
+ /* Read MC indirect register offset 0x9F bits [3:0] to see
+ * if VBIOS has already loaded a full version of MC ucode
+ * or not.
+ */
+
+ smu7_get_mc_microcode_version(hwmgr);
+ vbios_version = hwmgr->microcode_version_info.MC & 0xf;
+
+ data->need_long_memory_training = false;
+
+ cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
+ ixMC_IO_DEBUG_UP_13);
+ tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+ if (tmp & (1 << 23)) {
+ data->mem_latency_high = MEM_LATENCY_HIGH;
+ data->mem_latency_low = MEM_LATENCY_LOW;
+ } else {
+ data->mem_latency_high = 330;
+ data->mem_latency_low = 330;
+ }
+
+ return 0;
+}
+
+static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->clock_registers.vCG_SPLL_FUNC_CNTL =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
+ data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
+ data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
+ data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
+ data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
+ data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
+ data->clock_registers.vDLL_CNTL =
+ cgs_read_register(hwmgr->device, mmDLL_CNTL);
+ data->clock_registers.vMCLK_PWRMGT_CNTL =
+ cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
+ data->clock_registers.vMPLL_AD_FUNC_CNTL =
+ cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
+ data->clock_registers.vMPLL_DQ_FUNC_CNTL =
+ cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
+ data->clock_registers.vMPLL_FUNC_CNTL =
+ cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
+ data->clock_registers.vMPLL_FUNC_CNTL_1 =
+ cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
+ data->clock_registers.vMPLL_FUNC_CNTL_2 =
+ cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
+ data->clock_registers.vMPLL_SS1 =
+ cgs_read_register(hwmgr->device, mmMPLL_SS1);
+ data->clock_registers.vMPLL_SS2 =
+ cgs_read_register(hwmgr->device, mmMPLL_SS2);
+ return 0;
+
+}
+
+/**
+ * Find out if memory is GDDR5.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t temp;
+
+ temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
+
+ data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
+ ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
+ MC_SEQ_MISC0_GDDR5_SHIFT));
+
+ return 0;
+}
+
+/**
+ * Enables Dynamic Power Management by SMC
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, STATIC_PM_EN, 1);
+
+ return 0;
+}
+
+/**
+ * Initialize PowerGating States for different engines
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
+
+ return 0;
+}
+
+static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->low_sclk_interrupt_threshold = 0;
+ return 0;
+}
+
+int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ smu7_upload_mc_firmware(hwmgr);
+
+ tmp_result = smu7_read_clock_registers(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to read clock registers!", result = tmp_result);
+
+ tmp_result = smu7_get_memory_type(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to get memory type!", result = tmp_result);
+
+ tmp_result = smu7_enable_acpi_power_management(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable ACPI power management!", result = tmp_result);
+
+ tmp_result = smu7_init_power_gate_state(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to init power gate state!", result = tmp_result);
+
+ tmp_result = smu7_get_mc_microcode_version(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to get MC microcode version!", result = tmp_result);
+
+ tmp_result = smu7_init_sclk_threshold(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to init sclk threshold!", result = tmp_result);
+
+ return result;
+}
+
+static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ break;
+ case PP_MCLK:
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ break;
+ case PP_PCIE:
+ {
+ uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
+ if (!data->pcie_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ level);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = smu7_get_current_pcie_speed(hwmgr);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+ if (mode) {
+ /* stop auto-manage */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_set_static_mode(hwmgr, mode);
+ } else
+ /* restart auto-manage */
+ smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+
+ return 0;
+}
+
+static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->fan_ctrl_is_in_default_mode)
+ return hwmgr->fan_ctrl_default_mode;
+ else
+ return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, FDO_PWM_MODE);
+}
+
+static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct smu7_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct smu7_power_state *smu7_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+ smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct smu7_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct smu7_power_state *smu7_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+ smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
+
+static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+ int i;
+
+ if (table_info == NULL)
+ return -EINVAL;
+
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+
+ for (i = 0; i < dep_sclk_table->count; i++) {
+ clocks->clock[i] = dep_sclk_table->entries[i].clk;
+ clocks->count++;
+ }
+ return 0;
+}
+
+static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
+ return data->mem_latency_high;
+ else if (clk >= MEM_FREQ_HIGH_LATENCY)
+ return data->mem_latency_low;
+ else
+ return MEM_LATENCY_ERR;
+}
+
+static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+ int i;
+
+ if (table_info == NULL)
+ return -EINVAL;
+
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+
+ for (i = 0; i < dep_mclk_table->count; i++) {
+ clocks->clock[i] = dep_mclk_table->entries[i].clk;
+ clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+ dep_mclk_table->entries[i].clk);
+ clocks->count++;
+ }
+ return 0;
+}
+
+static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ switch (type) {
+ case amd_pp_sys_clock:
+ smu7_get_sclks(hwmgr, clocks);
+ break;
+ case amd_pp_mem_clock:
+ smu7_get_mclks(hwmgr, clocks);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
+ .backend_init = &smu7_hwmgr_backend_init,
+ .backend_fini = &phm_hwmgr_backend_fini,
+ .asic_setup = &smu7_setup_asic_task,
+ .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
+ .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
+ .force_dpm_level = &smu7_force_dpm_level,
+ .power_state_set = smu7_set_power_state_tasks,
+ .get_power_state_size = smu7_get_power_state_size,
+ .get_mclk = smu7_dpm_get_mclk,
+ .get_sclk = smu7_dpm_get_sclk,
+ .patch_boot_state = smu7_dpm_patch_boot_state,
+ .get_pp_table_entry = smu7_get_pp_table_entry,
+ .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
+ .powerdown_uvd = smu7_powerdown_uvd,
+ .powergate_uvd = smu7_powergate_uvd,
+ .powergate_vce = smu7_powergate_vce,
+ .disable_clock_power_gating = smu7_disable_clock_power_gating,
+ .update_clock_gatings = smu7_update_clock_gatings,
+ .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
+ .display_config_changed = smu7_display_configuration_changed_task,
+ .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
+ .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
+ .get_temperature = smu7_thermal_get_temperature,
+ .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
+ .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
+ .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
+ .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
+ .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
+ .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
+ .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
+ .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
+ .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
+ .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
+ .check_states_equal = smu7_check_states_equal,
+ .set_fan_control_mode = smu7_set_fan_control_mode,
+ .get_fan_control_mode = smu7_get_fan_control_mode,
+ .force_clock_level = smu7_force_clock_level,
+ .print_clock_levels = smu7_print_clock_levels,
+ .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+ .get_sclk_od = smu7_get_sclk_od,
+ .set_sclk_od = smu7_set_sclk_od,
+ .get_mclk_od = smu7_get_mclk_od,
+ .set_mclk_od = smu7_set_mclk_od,
+ .get_clock_by_type = smu7_get_clock_by_type,
+ .read_sensor = smu7_read_sensor,
+};
+
+uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr)
+{
+ uint8_t i;
+ uint32_t temp;
+ uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
+
+ PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
+ for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ temp = clock >> i;
+
+ if (temp >= min || i == 0)
+ break;
+ }
+ return i;
+}
+
+int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+
+ hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
+ if (hwmgr->pp_table_version == PP_TABLE_V0)
+ hwmgr->pptable_func = &pptable_funcs;
+ else if (hwmgr->pp_table_version == PP_TABLE_V1)
+ hwmgr->pptable_func = &pptable_v1_0_funcs;
+
+ pp_smu7_thermal_initialize(hwmgr);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index 33c33947e827..27e7f76ad8a6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -21,81 +21,100 @@
*
*/
-#ifndef POLARIS10_HWMGR_H
-#define POLARIS10_HWMGR_H
+#ifndef _SMU7_HWMGR_H
+#define _SMU7_HWMGR_H
#include "hwmgr.h"
-#include "smu74.h"
-#include "smu74_discrete.h"
#include "ppatomctrl.h"
-#include "polaris10_ppsmc.h"
-#include "polaris10_powertune.h"
-#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2
+#define SMU7_MAX_HARDWARE_POWERLEVELS 2
-#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0
-#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2
-#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3
+#define SMU7_VOLTAGE_CONTROL_NONE 0x0
+#define SMU7_VOLTAGE_CONTROL_BY_GPIO 0x1
+#define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2
+#define SMU7_VOLTAGE_CONTROL_MERGED 0x3
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
#define DPMTABLE_UPDATE_SCLK 0x00000004
#define DPMTABLE_UPDATE_MCLK 0x00000008
-struct polaris10_performance_level {
+enum gpu_pt_config_reg_type {
+ GPU_CONFIGREG_MMR = 0,
+ GPU_CONFIGREG_SMC_IND,
+ GPU_CONFIGREG_DIDT_IND,
+ GPU_CONFIGREG_GC_CAC_IND,
+ GPU_CONFIGREG_CACHE,
+ GPU_CONFIGREG_MAX
+};
+
+struct gpu_pt_config_reg {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t value;
+ enum gpu_pt_config_reg_type type;
+};
+
+struct smu7_performance_level {
uint32_t memory_clock;
uint32_t engine_clock;
uint16_t pcie_gen;
uint16_t pcie_lane;
};
-struct polaris10_uvd_clocks {
+struct smu7_thermal_temperature_setting {
+ long temperature_low;
+ long temperature_high;
+ long temperature_shutdown;
+};
+
+struct smu7_uvd_clocks {
uint32_t vclk;
uint32_t dclk;
};
-struct polaris10_vce_clocks {
+struct smu7_vce_clocks {
uint32_t evclk;
uint32_t ecclk;
};
-struct polaris10_power_state {
+struct smu7_power_state {
uint32_t magic;
- struct polaris10_uvd_clocks uvd_clks;
- struct polaris10_vce_clocks vce_clks;
+ struct smu7_uvd_clocks uvd_clks;
+ struct smu7_vce_clocks vce_clks;
uint32_t sam_clk;
uint16_t performance_level_count;
bool dc_compatible;
uint32_t sclk_threshold;
- struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS];
+ struct smu7_performance_level performance_levels[SMU7_MAX_HARDWARE_POWERLEVELS];
};
-struct polaris10_dpm_level {
+struct smu7_dpm_level {
bool enabled;
uint32_t value;
uint32_t param1;
};
-#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define SMU7_MAX_DEEPSLEEP_DIVIDER_ID 5
#define MAX_REGULAR_DPM_NUMBER 8
-#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500
+#define SMU7_MINIMUM_ENGINE_CLOCK 2500
-struct polaris10_single_dpm_table {
+struct smu7_single_dpm_table {
uint32_t count;
- struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+ struct smu7_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
};
-struct polaris10_dpm_table {
- struct polaris10_single_dpm_table sclk_table;
- struct polaris10_single_dpm_table mclk_table;
- struct polaris10_single_dpm_table pcie_speed_table;
- struct polaris10_single_dpm_table vddc_table;
- struct polaris10_single_dpm_table vddci_table;
- struct polaris10_single_dpm_table mvdd_table;
+struct smu7_dpm_table {
+ struct smu7_single_dpm_table sclk_table;
+ struct smu7_single_dpm_table mclk_table;
+ struct smu7_single_dpm_table pcie_speed_table;
+ struct smu7_single_dpm_table vddc_table;
+ struct smu7_single_dpm_table vddci_table;
+ struct smu7_single_dpm_table mvdd_table;
};
-struct polaris10_clock_registers {
+struct smu7_clock_registers {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -116,42 +135,35 @@ struct polaris10_clock_registers {
#define DISABLE_MC_LOADMICROCODE 1
#define DISABLE_MC_CFGPROGRAMMING 2
-struct polaris10_voltage_smio_registers {
+struct smu7_voltage_smio_registers {
uint32_t vS0_VID_LOWER_SMIO_CNTL;
};
-#define POLARIS10_MAX_LEAKAGE_COUNT 8
+#define SMU7_MAX_LEAKAGE_COUNT 8
-struct polaris10_leakage_voltage {
+struct smu7_leakage_voltage {
uint16_t count;
- uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT];
- uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT];
+ uint16_t leakage_id[SMU7_MAX_LEAKAGE_COUNT];
+ uint16_t actual_voltage[SMU7_MAX_LEAKAGE_COUNT];
};
-struct polaris10_vbios_boot_state {
+struct smu7_vbios_boot_state {
uint16_t mvdd_bootup_value;
uint16_t vddc_bootup_value;
uint16_t vddci_bootup_value;
+ uint16_t vddgfx_bootup_value;
uint32_t sclk_bootup_value;
uint32_t mclk_bootup_value;
uint16_t pcie_gen_bootup_value;
uint16_t pcie_lane_bootup_value;
};
-/* Ultra Low Voltage parameter structure */
-struct polaris10_ulv_parm {
- bool ulv_supported;
- uint32_t cg_ulv_parameter;
- uint32_t ulv_volt_change_delay;
- struct polaris10_performance_level ulv_power_level;
-};
-
-struct polaris10_display_timing {
+struct smu7_display_timing {
uint32_t min_clock_in_sr;
uint32_t num_existing_displays;
};
-struct polaris10_dpmlevel_enable_mask {
+struct smu7_dpmlevel_enable_mask {
uint32_t uvd_dpm_enable_mask;
uint32_t vce_dpm_enable_mask;
uint32_t acp_dpm_enable_mask;
@@ -161,22 +173,15 @@ struct polaris10_dpmlevel_enable_mask {
uint32_t pcie_dpm_enable_mask;
};
-struct polaris10_pcie_perf_range {
+struct smu7_pcie_perf_range {
uint16_t max;
uint16_t min;
};
-struct polaris10_range_table {
- uint32_t trans_lower_frequency; /* in 10khz */
- uint32_t trans_upper_frequency;
-};
-struct polaris10_hwmgr {
- struct polaris10_dpm_table dpm_table;
- struct polaris10_dpm_table golden_dpm_table;
- SMU74_Discrete_DpmTable smc_state_table;
- struct SMU74_Discrete_Ulv ulv_setting;
+struct smu7_hwmgr {
+ struct smu7_dpm_table dpm_table;
+ struct smu7_dpm_table golden_dpm_table;
- struct polaris10_range_table range_table[NUM_SCLK_RANGE];
uint32_t voting_rights_clients0;
uint32_t voting_rights_clients1;
uint32_t voting_rights_clients2;
@@ -188,12 +193,11 @@ struct polaris10_hwmgr {
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
- uint32_t vddc_vddci_delta;
-
+ uint32_t vdd_gfx_control;
+ uint32_t vddc_vddgfx_delta;
uint32_t active_auto_throttle_sources;
- struct polaris10_clock_registers clock_registers;
- struct polaris10_voltage_smio_registers voltage_smio_registers;
+ struct smu7_clock_registers clock_registers;
bool is_memory_gddr5;
uint16_t acpi_vddc;
@@ -203,8 +207,9 @@ struct polaris10_hwmgr {
uint32_t pcie_gen_cap;
uint32_t pcie_lane_cap;
uint32_t pcie_spc_cap;
- struct polaris10_leakage_voltage vddc_leakage;
- struct polaris10_leakage_voltage Vddci_leakage;
+ struct smu7_leakage_voltage vddc_leakage;
+ struct smu7_leakage_voltage vddci_leakage;
+ struct smu7_leakage_voltage vddcgfx_leakage;
uint32_t mvdd_control;
uint32_t vddc_mask_low;
@@ -213,30 +218,23 @@ struct polaris10_hwmgr {
uint16_t min_vddc_in_pptable;
uint16_t max_vddci_in_pptable;
uint16_t min_vddci_in_pptable;
- uint32_t mclk_strobe_mode_threshold;
- uint32_t mclk_stutter_mode_threshold;
- uint32_t mclk_edc_enable_threshold;
- uint32_t mclk_edcwr_enable_threshold;
bool is_uvd_enabled;
- struct polaris10_vbios_boot_state vbios_boot_state;
+ struct smu7_vbios_boot_state vbios_boot_state;
bool pcie_performance_request;
bool battery_state;
bool is_tlu_enabled;
+ bool disable_handshake;
+ bool smc_voltage_control_enabled;
+ bool vbi_time_out_support;
- /* ---- SMC SRAM Address of firmware header tables ---- */
- uint32_t sram_end;
- uint32_t dpm_table_start;
- uint32_t soft_regs_start;
- uint32_t mc_reg_table_start;
- uint32_t fan_table_start;
- uint32_t arb_table_start;
-
+ uint32_t soft_regs_start;
/* ---- Stuff originally coming from Evergreen ---- */
uint32_t vddci_control;
struct pp_atomctrl_voltage_table vddc_voltage_table;
struct pp_atomctrl_voltage_table vddci_voltage_table;
struct pp_atomctrl_voltage_table mvdd_voltage_table;
+ struct pp_atomctrl_voltage_table vddgfx_voltage_table;
uint32_t mgcg_cgtt_local2;
uint32_t mgcg_cgtt_local3;
@@ -250,7 +248,7 @@ struct polaris10_hwmgr {
bool performance_request_registered;
/* ---- Low Power Features ---- */
- struct polaris10_ulv_parm ulv;
+ bool ulv_supported;
/* ---- CAC Stuff ---- */
uint32_t cac_table_start;
@@ -264,8 +262,8 @@ struct polaris10_hwmgr {
bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature;
- const struct polaris10_pt_defaults *power_tune_defaults;
- struct SMU74_Discrete_PmFuses power_tune_table;
+
+
uint32_t dte_tj_offset;
uint32_t fast_watermark_threshold;
@@ -273,23 +271,22 @@ struct polaris10_hwmgr {
bool vddc_phase_shed_control;
/* ---- DI/DT ---- */
- struct polaris10_display_timing display_timing;
- uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
+ struct smu7_display_timing display_timing;
/* ---- Thermal Temperature Setting ---- */
- struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask;
+ struct smu7_thermal_temperature_setting thermal_temp_setting;
+ struct smu7_dpmlevel_enable_mask dpm_level_enable_mask;
uint32_t need_update_smu7_dpm_table;
uint32_t sclk_dpm_key_disabled;
uint32_t mclk_dpm_key_disabled;
uint32_t pcie_dpm_key_disabled;
uint32_t min_engine_clocks;
- struct polaris10_pcie_perf_range pcie_gen_performance;
- struct polaris10_pcie_perf_range pcie_lane_performance;
- struct polaris10_pcie_perf_range pcie_gen_power_saving;
- struct polaris10_pcie_perf_range pcie_lane_power_saving;
+ struct smu7_pcie_perf_range pcie_gen_performance;
+ struct smu7_pcie_perf_range pcie_lane_performance;
+ struct smu7_pcie_perf_range pcie_gen_power_saving;
+ struct smu7_pcie_perf_range pcie_lane_power_saving;
bool use_pcie_performance_levels;
bool use_pcie_power_saving_levels;
- uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
uint32_t mclk_activity_target;
uint32_t mclk_dpm0_activity_target;
uint32_t low_sclk_interrupt_threshold;
@@ -309,49 +306,48 @@ struct polaris10_hwmgr {
uint32_t up_hyst;
uint32_t disable_dpm_mask;
bool apply_optimized_settings;
+
uint32_t avfs_vdroop_override_setting;
bool apply_avfs_cks_off_voltage;
uint32_t frame_time_x2;
+ uint16_t mem_latency_high;
+ uint16_t mem_latency_low;
};
/* To convert to Q8.8 format for firmware */
-#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256
-
-enum Polaris10_I2CLineID {
- Polaris10_I2CLineID_DDC1 = 0x90,
- Polaris10_I2CLineID_DDC2 = 0x91,
- Polaris10_I2CLineID_DDC3 = 0x92,
- Polaris10_I2CLineID_DDC4 = 0x93,
- Polaris10_I2CLineID_DDC5 = 0x94,
- Polaris10_I2CLineID_DDC6 = 0x95,
- Polaris10_I2CLineID_SCLSDA = 0x96,
- Polaris10_I2CLineID_DDCVGA = 0x97
+#define SMU7_Q88_FORMAT_CONVERSION_UNIT 256
+
+enum SMU7_I2CLineID {
+ SMU7_I2CLineID_DDC1 = 0x90,
+ SMU7_I2CLineID_DDC2 = 0x91,
+ SMU7_I2CLineID_DDC3 = 0x92,
+ SMU7_I2CLineID_DDC4 = 0x93,
+ SMU7_I2CLineID_DDC5 = 0x94,
+ SMU7_I2CLineID_DDC6 = 0x95,
+ SMU7_I2CLineID_SCLSDA = 0x96,
+ SMU7_I2CLineID_DDCVGA = 0x97
};
-#define POLARIS10_I2C_DDC1DATA 0
-#define POLARIS10_I2C_DDC1CLK 1
-#define POLARIS10_I2C_DDC2DATA 2
-#define POLARIS10_I2C_DDC2CLK 3
-#define POLARIS10_I2C_DDC3DATA 4
-#define POLARIS10_I2C_DDC3CLK 5
-#define POLARIS10_I2C_SDA 40
-#define POLARIS10_I2C_SCL 41
-#define POLARIS10_I2C_DDC4DATA 65
-#define POLARIS10_I2C_DDC4CLK 66
-#define POLARIS10_I2C_DDC5DATA 0x48
-#define POLARIS10_I2C_DDC5CLK 0x49
-#define POLARIS10_I2C_DDC6DATA 0x4a
-#define POLARIS10_I2C_DDC6CLK 0x4b
-#define POLARIS10_I2C_DDCVGADATA 0x4c
-#define POLARIS10_I2C_DDCVGACLK 0x4d
-
-#define POLARIS10_UNUSED_GPIO_PIN 0x7F
-
-int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
-
-int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+#define SMU7_I2C_DDC1DATA 0
+#define SMU7_I2C_DDC1CLK 1
+#define SMU7_I2C_DDC2DATA 2
+#define SMU7_I2C_DDC2CLK 3
+#define SMU7_I2C_DDC3DATA 4
+#define SMU7_I2C_DDC3CLK 5
+#define SMU7_I2C_SDA 40
+#define SMU7_I2C_SCL 41
+#define SMU7_I2C_DDC4DATA 65
+#define SMU7_I2C_DDC4CLK 66
+#define SMU7_I2C_DDC5DATA 0x48
+#define SMU7_I2C_DDC5CLK 0x49
+#define SMU7_I2C_DDC6DATA 0x4a
+#define SMU7_I2C_DDC6CLK 0x4b
+#define SMU7_I2C_DDCVGADATA 0x4c
+#define SMU7_I2C_DDCVGACLK 0x4d
+
+#define SMU7_UNUSED_GPIO_PIN 0x7F
+uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
+uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index b9cb240a135d..41b634ffa5b0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -20,546 +20,364 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "hwmgr.h"
#include "smumgr.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_powertune.h"
-#include "polaris10_smumgr.h"
-#include "smu74_discrete.h"
+#include "smu7_hwmgr.h"
+#include "smu7_powertune.h"
#include "pp_debug.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-#include "oss/oss_3_0_sh_mask.h"
+#include "smu7_common.h"
#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
-struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
+static const struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND },
{ 0xFFFFFFFF }
};
-struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
+static const struct gpu_pt_config_reg GCCACConfig_Polaris11[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, GPU_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, GPU_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, GPU_CONFIGREG_GC_CAC_IND },
{ 0xFFFFFFFF }
};
-struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
+static const struct gpu_pt_config_reg DIDTConfig_Polaris10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ 0xFFFFFFFF }
};
-struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
+static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ 0xFFFFFFFF }
};
-static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
- { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
- { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
- { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
-};
-
-void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- polaris10_hwmgr->power_tune_defaults =
- &polaris10_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
-
-}
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
- SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table=
- &hwmgr->thermal_controller.advanceFanControlParameters;
- int i, j, k;
- const uint16_t *pdef1;
- const uint16_t *pdef2;
-
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",
- );
-
- dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
-
- pdef1 = defaults->BAPMTI_R;
- pdef2 = defaults->BAPMTI_RC;
-
- for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU74_DTE_SOURCES; j++) {
- for (k = 0; k < SMU74_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
- dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
- pdef1++;
- pdef2++;
- }
- }
- }
-
- return 0;
-}
-
-static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-
- data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- data->power_tune_table.SviLoadLineTrimVddC = 3;
- data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
- uint32_t temp;
-
- if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
- || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
{
uint32_t en = enable ? 1 : 0;
@@ -608,29 +426,29 @@ static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
return result;
}
-static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
- struct polaris10_pt_config_reg *cac_config_regs)
+static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr,
+ const struct gpu_pt_config_reg *cac_config_regs)
{
- struct polaris10_pt_config_reg *config_regs = cac_config_regs;
+ const struct gpu_pt_config_reg *config_regs = cac_config_regs;
uint32_t cache = 0;
uint32_t data = 0;
PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
while (config_regs->offset != 0xFFFFFFFF) {
- if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
+ if (config_regs->type == GPU_CONFIGREG_CACHE)
cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
else {
switch (config_regs->type) {
- case POLARIS10_CONFIGREG_SMC_IND:
+ case GPU_CONFIGREG_SMC_IND:
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
break;
- case POLARIS10_CONFIGREG_DIDT_IND:
+ case GPU_CONFIGREG_DIDT_IND:
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
break;
- case POLARIS10_CONFIGREG_GC_CAC_IND:
+ case GPU_CONFIGREG_GC_CAC_IND:
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
break;
@@ -644,15 +462,15 @@ static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
data |= cache;
switch (config_regs->type) {
- case POLARIS10_CONFIGREG_SMC_IND:
+ case GPU_CONFIGREG_SMC_IND:
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
break;
- case POLARIS10_CONFIGREG_DIDT_IND:
+ case GPU_CONFIGREG_DIDT_IND:
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
break;
- case POLARIS10_CONFIGREG_GC_CAC_IND:
+ case GPU_CONFIGREG_GC_CAC_IND:
cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
break;
@@ -669,7 +487,7 @@ static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
return 0;
}
-int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
+int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
uint32_t num_se = 0;
@@ -699,20 +517,20 @@ int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
if (hwmgr->chip_id == CHIP_POLARIS10) {
- result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
+ result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
} else if (hwmgr->chip_id == CHIP_POLARIS11) {
- result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
+ result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
}
}
cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
- result = polaris10_enable_didt(hwmgr, true);
+ result = smu7_enable_didt(hwmgr, true);
PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
/* TO DO Post DIDT enable clock gating */
@@ -721,7 +539,7 @@ int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
+int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
@@ -731,7 +549,7 @@ int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
/* TO DO Pre DIDT disable clock gating */
- result = polaris10_enable_didt(hwmgr, false);
+ result = smu7_enable_didt(hwmgr, false);
PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
/* TO DO Post DIDT enable clock gating */
}
@@ -739,95 +557,9 @@ int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
return 0;
}
-
-static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
- data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
- return 0;
-}
-
-int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- if (polaris10_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
-
- if (polaris10_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
-
- if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- if (0 != polaris10_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- if (polaris10_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- if (polaris10_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&data->power_tune_table,
- (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -843,9 +575,9 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
return result;
}
-int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
+int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -860,9 +592,9 @@ int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
return result;
}
-int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
@@ -871,21 +603,27 @@ int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
return 0;
}
-static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
-int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
+int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
int smc_result;
int result = 0;
+ struct phm_cac_tdp_table *cac_table;
data->power_containment_features = 0;
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ cac_table = table_info->cac_dtp_table;
+ else
+ cac_table = hwmgr->dyn_state.cac_dtp_table;
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
@@ -905,15 +643,13 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
- struct phm_cac_tdp_table *cac_table =
- table_info->cac_dtp_table;
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
- if (polaris10_set_power_limit(hwmgr, default_limit))
+ if (smu7_set_power_limit(hwmgr, default_limit))
printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
}
}
@@ -921,9 +657,9 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
-int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
+int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -963,14 +699,19 @@ int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
-int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
+int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+ struct phm_cac_tdp_table *cac_table;
+
int adjust_percent, target_tdp;
int result = 0;
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ cac_table = table_info->cac_dtp_table;
+ else
+ cac_table = hwmgr->dyn_state.cac_dtp_table;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
@@ -981,7 +722,7 @@ int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
* but message to be 8 bit fraction for messages
*/
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
- result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
+ result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
return result;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
index bc78e28f010d..22f86b6bf1be 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
@@ -20,17 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef POLARIS10_POWERTUNE_H
-#define POLARIS10_POWERTUNE_H
-
-enum polaris10_pt_config_reg_type {
- POLARIS10_CONFIGREG_MMR = 0,
- POLARIS10_CONFIGREG_SMC_IND,
- POLARIS10_CONFIGREG_DIDT_IND,
- POLARIS10_CONFIGREG_GC_CAC_IND,
- POLARIS10_CONFIGREG_CACHE,
- POLARIS10_CONFIGREG_MAX
-};
+#ifndef _SMU7_POWERTUNE_H
+#define _SMU7_POWERTUNE_H
#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
@@ -52,43 +43,20 @@ enum polaris10_pt_config_reg_type {
#define ixGC_CAC_CNTL 0x0000
#define ixDIDT_SQ_STALL_CTRL 0x0004
-#define ixDIDT_SQ_TUNING_CTRL 0x0005
+#define ixDIDT_SQ_TUNING_CTRL 0x0005
#define ixDIDT_TD_STALL_CTRL 0x0044
#define ixDIDT_TD_TUNING_CTRL 0x0045
#define ixDIDT_TCP_STALL_CTRL 0x0064
#define ixDIDT_TCP_TUNING_CTRL 0x0065
-struct polaris10_pt_config_reg {
- uint32_t offset;
- uint32_t mask;
- uint32_t shift;
- uint32_t value;
- enum polaris10_pt_config_reg_type type;
-};
-
-struct polaris10_pt_defaults {
- uint8_t SviLoadLineEn;
- uint8_t SviLoadLineVddC;
- uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
- uint8_t TDC_MAWt;
- uint8_t TdcWaterfallCtl;
- uint8_t DTEAmbientTempBase;
-
- uint32_t DisplayCac;
- uint32_t BAPM_TEMP_GRADIENT;
- uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
- uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-};
-void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
-int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
-int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
-int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
-#endif /* POLARIS10_POWERTUNE_H */
+int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr);
+int smu7_enable_power_containment(struct pp_hwmgr *hwmgr);
+int smu7_disable_power_containment(struct pp_hwmgr *hwmgr);
+int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
+int smu7_power_control_set_level(struct pp_hwmgr *hwmgr);
+int smu7_enable_didt_config(struct pp_hwmgr *hwmgr);
+int smu7_disable_didt_config(struct pp_hwmgr *hwmgr);
+#endif /* DGPU_POWERTUNE_H */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 92976b68d6fd..fb6c6f6106d5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,18 +20,15 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
#include <asm/div64.h>
-#include "fiji_thermal.h"
-#include "fiji_hwmgr.h"
-#include "fiji_smumgr.h"
-#include "fiji_ppsmc.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+#include "smu7_thermal.h"
+#include "smu7_hwmgr.h"
+#include "smu7_common.h"
+
+int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
-
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
@@ -55,7 +52,7 @@ int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
return 0;
}
-int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
uint32_t duty100;
@@ -84,7 +81,7 @@ int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
return 0;
}
-int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
uint32_t tach_period;
uint32_t crystal_clock_freq;
@@ -100,9 +97,9 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (tach_period == 0)
return -EINVAL;
- crystal_clock_freq = tonga_get_xclk(hwmgr);
+ crystal_clock_freq = smu7_get_xclk(hwmgr);
- *speed = 60 * crystal_clock_freq * 10000/ tach_period;
+ *speed = 60 * crystal_clock_freq * 10000 / tach_period;
return 0;
}
@@ -113,7 +110,7 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
* @exception Should always succeed.
*/
-int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (hwmgr->fan_ctrl_is_in_default_mode) {
@@ -139,7 +136,7 @@ int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
* @param hwmgr the address of the powerplay hardware manager.
* @exception Should always succeed.
*/
-int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->fan_ctrl_is_in_default_mode) {
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -152,7 +149,7 @@ int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
return 0;
}
-int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
@@ -187,7 +184,7 @@ int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
}
-int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
}
@@ -198,7 +195,7 @@ int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
* @param speed is the percentage value (0% - 100%) to be set.
* @exception Fails is the 100% setting appears to be 0.
*/
-int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
uint32_t duty100;
@@ -213,7 +210,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
- fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -228,7 +225,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
- return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
@@ -236,7 +233,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
* @param hwmgr the address of the powerplay hardware manager.
* @exception Always succeeds.
*/
-int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
int result;
@@ -245,11 +242,11 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
- result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (!result)
- result = fiji_fan_ctrl_start_smc_fan_control(hwmgr);
+ result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
} else
- result = fiji_fan_ctrl_set_default_mode(hwmgr);
+ result = smu7_fan_ctrl_set_default_mode(hwmgr);
return result;
}
@@ -260,7 +257,7 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
* @param speed is the percentage value (min - max) to be set.
* @exception Fails is the speed not lie between min and max.
*/
-int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{
uint32_t tach_period;
uint32_t crystal_clock_freq;
@@ -272,14 +269,18 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0;
- crystal_clock_freq = tonga_get_xclk(hwmgr);
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+ crystal_clock_freq = smu7_get_xclk(hwmgr);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD, tach_period);
- return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
@@ -287,7 +288,7 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
*
* @param hwmgr The address of the hardware manager.
*/
-int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int temp;
@@ -296,7 +297,7 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
/* Bit 9 means the reading is lower than the lowest usable value. */
if (temp & 0x200)
- temp = FIJI_THERMAL_MAXIMUM_TEMP_READING;
+ temp = SMU7_THERMAL_MAXIMUM_TEMP_READING;
else
temp = temp & 0x1ff;
@@ -312,12 +313,12 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
* @param range Temperature range to be programmed for high and low alert signals
* @exception PP_Result_BadInput if the input data is not valid.
*/
-static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
uint32_t low_temp, uint32_t high_temp)
{
- uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP *
+ uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP *
+ uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (low < low_temp)
@@ -346,7 +347,7 @@ static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
*
* @param hwmgr The address of the hardware manager.
*/
-static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
+static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
{
if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -365,13 +366,13 @@ static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
*
* @param hwmgr The address of the hardware manager.
*/
-static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK);
- alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
+ alert &= ~(SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert);
@@ -383,13 +384,13 @@ static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
* Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager.
*/
-static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK);
- alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
+ alert |= (SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert);
@@ -402,129 +403,17 @@ static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
* Currently just disables alerts.
* @param hwmgr The address of the hardware manager.
*/
-int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
+int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
- int result = fiji_thermal_disable_alert(hwmgr);
+ int result = smu7_thermal_disable_alert(hwmgr);
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- fiji_fan_ctrl_set_default_mode(hwmgr);
+ if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+ smu7_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (data->fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = tonga_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- data->sram_end);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-/**
* Start the fan control on the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
@@ -533,7 +422,7 @@ int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
/* If the fantable setup has failed we could have disabled
@@ -543,8 +432,8 @@ int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
*/
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
- fiji_fan_ctrl_start_smc_fan_control(hwmgr);
- fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
@@ -559,7 +448,7 @@ int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
@@ -567,7 +456,7 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
if (range == NULL)
return -EINVAL;
- return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max);
+ return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
}
/**
@@ -579,10 +468,10 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from initialize thermal controller routine
*/
-int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
- return fiji_thermal_initialize(hwmgr);
+ return smu7_thermal_initialize(hwmgr);
}
/**
@@ -594,10 +483,10 @@ int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from enable alert routine
*/
-int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
- return fiji_thermal_enable_alert(hwmgr);
+ return smu7_thermal_enable_alert(hwmgr);
}
/**
@@ -609,53 +498,54 @@ int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from disable alert routine
*/
-static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
- return fiji_thermal_disable_alert(hwmgr);
+ return smu7_thermal_disable_alert(hwmgr);
}
static const struct phm_master_table_item
-fiji_thermal_start_thermal_controller_master_list[] = {
- {NULL, tf_fiji_thermal_initialize},
- {NULL, tf_fiji_thermal_set_temperature_range},
- {NULL, tf_fiji_thermal_enable_alert},
+phm_thermal_start_thermal_controller_master_list[] = {
+ {NULL, tf_smu7_thermal_initialize},
+ {NULL, tf_smu7_thermal_set_temperature_range},
+ {NULL, tf_smu7_thermal_enable_alert},
+ {NULL, smum_thermal_avfs_enable},
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- {NULL, tf_fiji_thermal_setup_fan_table},
- {NULL, tf_fiji_thermal_start_smc_fan_control},
+ {NULL, smum_thermal_setup_fan_table},
+ {NULL, tf_smu7_thermal_start_smc_fan_control},
{NULL, NULL}
};
static const struct phm_master_table_header
-fiji_thermal_start_thermal_controller_master = {
+phm_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
- fiji_thermal_start_thermal_controller_master_list
+ phm_thermal_start_thermal_controller_master_list
};
static const struct phm_master_table_item
-fiji_thermal_set_temperature_range_master_list[] = {
- {NULL, tf_fiji_thermal_disable_alert},
- {NULL, tf_fiji_thermal_set_temperature_range},
- {NULL, tf_fiji_thermal_enable_alert},
+phm_thermal_set_temperature_range_master_list[] = {
+ {NULL, tf_smu7_thermal_disable_alert},
+ {NULL, tf_smu7_thermal_set_temperature_range},
+ {NULL, tf_smu7_thermal_enable_alert},
{NULL, NULL}
};
static const struct phm_master_table_header
-fiji_thermal_set_temperature_range_master = {
+phm_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,
- fiji_thermal_set_temperature_range_master_list
+ phm_thermal_set_temperature_range_master_list
};
-int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
+int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->thermal_controller.fanInfo.bNoFan)
- fiji_fan_ctrl_set_default_mode(hwmgr);
+ smu7_fan_ctrl_set_default_mode(hwmgr);
return 0;
}
@@ -664,17 +554,17 @@ int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
* @param hwmgr The address of the hardware manager.
* @exception Any error code from the low-level communication.
*/
-int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
+int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
{
int result;
result = phm_construct_table(hwmgr,
- &fiji_thermal_set_temperature_range_master,
+ &phm_thermal_set_temperature_range_master,
&(hwmgr->set_temperature_range));
if (!result) {
result = phm_construct_table(hwmgr,
- &fiji_thermal_start_thermal_controller_master,
+ &phm_thermal_start_thermal_controller_master,
&(hwmgr->start_thermal_controller));
if (result)
phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
new file mode 100644
index 000000000000..6face973be43
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_THERMAL_H_
+#define _SMU7_THERMAL_H_
+
+#include "hwmgr.h"
+
+#define SMU7_THERMAL_HIGH_ALERT_MASK 0x1
+#define SMU7_THERMAL_LOW_ALERT_MASK 0x2
+
+#define SMU7_THERMAL_MINIMUM_TEMP_READING -256
+#define SMU7_THERMAL_MAXIMUM_TEMP_READING 255
+
+#define SMU7_THERMAL_MINIMUM_ALERT_TEMP 0
+#define SMU7_THERMAL_MAXIMUM_ALERT_TEMP 255
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
+extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
+extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
deleted file mode 100644
index e58d038a997b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "tonga_clockpowergating.h"
-#include "tonga_ppsmc.h"
-#include "tonga_hwmgr.h"
-
-int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
- if (phm_cf_want_uvd_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_UVDPowerOFF);
- return 0;
-}
-
-int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
-{
- if (phm_cf_want_uvd_power_gating(hwmgr)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDynamicPowerGating)) {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDPowerON, 1);
- } else {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDPowerON, 0);
- }
- }
-
- return 0;
-}
-
-int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
-{
- if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_VCEPowerOFF);
- return 0;
-}
-
-int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr)
-{
- if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_VCEPowerON);
- return 0;
-}
-
-int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
-{
- int ret = 0;
-
- switch (block) {
- case PHM_AsicBlock_UVD_MVC:
- case PHM_AsicBlock_UVD:
- case PHM_AsicBlock_UVD_HD:
- case PHM_AsicBlock_UVD_SD:
- if (gating == PHM_ClockGateSetting_StaticOff)
- ret = tonga_phm_powerdown_uvd(hwmgr);
- else
- ret = tonga_phm_powerup_uvd(hwmgr);
- break;
- case PHM_AsicBlock_GFX:
- default:
- break;
- }
-
- return ret;
-}
-
-int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
-
- tonga_phm_powerup_uvd(hwmgr);
- tonga_phm_powerup_vce(hwmgr);
-
- return 0;
-}
-
-int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (data->uvd_power_gated == bgate)
- return 0;
-
- data->uvd_power_gated = bgate;
-
- if (bgate) {
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- tonga_update_uvd_dpm(hwmgr, true);
- tonga_phm_powerdown_uvd(hwmgr);
- } else {
- tonga_phm_powerup_uvd(hwmgr);
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
-
- tonga_update_uvd_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
-int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct phm_set_power_state_input states;
- const struct pp_power_state *pcurrent;
- struct pp_power_state *requested;
-
- pcurrent = hwmgr->current_ps;
- requested = hwmgr->request_ps;
-
- states.pcurrent_state = &(pcurrent->hardware);
- states.pnew_state = &(requested->hardware);
-
- if (phm_cf_want_vce_power_gating(hwmgr)) {
- if (data->vce_power_gated != bgate) {
- if (bgate) {
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- tonga_enable_disable_vce_dpm(hwmgr, false);
- data->vce_power_gated = true;
- } else {
- tonga_phm_powerup_vce(hwmgr);
- data->vce_power_gated = false;
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
-
- tonga_update_vce_dpm(hwmgr, &states);
- tonga_enable_disable_vce_dpm(hwmgr, true);
- return 0;
- }
- }
- } else {
- tonga_update_vce_dpm(hwmgr, &states);
- tonga_enable_disable_vce_dpm(hwmgr, true);
- return 0;
- }
-
- if (!data->vce_power_gated)
- tonga_update_vce_dpm(hwmgr, &states);
-
- return 0;
-}
-
-int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
- const uint32_t *msg_id)
-{
- PPSMC_Msg msg;
- uint32_t value;
-
- switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
- case PP_GROUP_GFX:
- switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
- case PP_BLOCK_GFX_CG:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_GFX_CGCG_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_GFX_CGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- case PP_BLOCK_GFX_MG:
- /* For GFX MGCG, there are three different ones;
- * CPF, RLC, and all others. CPF MGCG will not be used for Tonga.
- * For GFX MGLS, Tonga will not support it.
- * */
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK);
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- default:
- return -1;
- }
- break;
-
- case PP_GROUP_SYS:
- switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
- case PP_BLOCK_SYS_BIF:
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_BIF_MGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- case PP_BLOCK_SYS_MC:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_MC_MGCG_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
-
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_MC_MGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
-
- }
- break;
-
- case PP_BLOCK_SYS_HDP:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_HDP_MGCG_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
-
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
-
- value = CG_SYS_HDP_MGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- case PP_BLOCK_SYS_SDMA:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_SDMA_MGCG_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
-
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
-
- value = CG_SYS_SDMA_MGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- case PP_BLOCK_SYS_ROM:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_ROM_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- default:
- return -1;
-
- }
- break;
-
- default:
- return -1;
-
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
deleted file mode 100644
index 080d69d77f04..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_DYN_DEFAULTS_H
-#define TONGA_DYN_DEFAULTS_H
-
-
-/** \file
- * Volcanic Islands Dynamic default parameters.
- */
-
-enum TONGAdpm_TrendDetection {
- TONGAdpm_TrendDetection_AUTO,
- TONGAdpm_TrendDetection_UP,
- TONGAdpm_TrendDetection_DOWN
-};
-typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection;
-
-/* Bit vector representing same fields as hardware register. */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */
-/* HDP_busy */
-/* IH_busy */
-/* DRM_busy */
-/* DRMDMA_busy */
-/* UVD_busy */
-/* VCE_busy */
-/* ACP_busy */
-/* SAMU_busy */
-/* AVP_busy */
-/* SDMA enabled */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
-/* SH_Gfx_busy */
-/* RB_Gfx_busy */
-/* VCE_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
-/* FE_Gfx_busy */
-/* RB_Gfx_busy */
-/* ACP_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
-/* FE_Gfx_busy */
-/* SH_Gfx_busy */
-/* UVD_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */
-/* VCE_busy */
-/* ACP_busy */
-/* SAMU_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */
-
-
-/* thermal protection counter (units).*/
-#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0
-
-/* static screen threshold */
-#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPTONGA_REFERENCEDIVIDER_DFLT 4
-
-/*
- * ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-
-#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687
-
-#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035
-#define PPTONGA_CGULVCONTROL_DFLT 0x00007450
-#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */
-#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
deleted file mode 100644
index c7dc111221c2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ /dev/null
@@ -1,6276 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include <atombios.h>
-#include "tonga_hwmgr.h"
-#include "pptable.h"
-#include "processpptables.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
-#include "pp_debug.h"
-#include "tonga_ppsmc.h"
-#include "cgs_common.h"
-#include "pppcielanes.h"
-#include "tonga_dyn_defaults.h"
-#include "smumgr.h"
-#include "tonga_smumgr.h"
-#include "tonga_clockpowergating.h"
-#include "tonga_thermal.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-#include "cgs_linux.h"
-#include "eventmgr.h"
-#include "amd_pcie_helpers.h"
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0 0x05
-#define MC_CG_SEQ_DRAMCONF_S1 0x06
-#define MC_CG_SEQ_YCLK_SUSPEND 0x04
-#define MC_CG_SEQ_YCLK_RESUME 0x0a
-
-#define PCIE_BUS_CLK 10000
-#define TCLK (PCIE_BUS_CLK / 10)
-
-#define SMC_RAM_END 0x40000
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-#define VDDC_VDDCI_DELTA 200
-#define VDDC_VDDGFX_DELTA 300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-typedef uint32_t PECI_RegistryValue;
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-static const uint16_t PP_ClockStretcherLookupTable[2][4] = {
- {600, 1050, 3, 0},
- {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
- { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t PP_ClockStretchAmountConversion[2][6] = {
- {0, 1, 3, 2, 4, 5},
- {0, 2, 4, 5, 6, 5} };
-
-/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
- DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
- DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
- DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
- DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
- DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
-};
-typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
-
-static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct tonga_power_state *cast_phw_tonga_power_state(
- struct pp_hw_power_state *hw_ps)
-{
- if (hw_ps == NULL)
- return NULL;
-
- PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (struct tonga_power_state *)hw_ps;
-}
-
-const struct tonga_power_state *cast_const_phw_tonga_power_state(
- const struct pp_hw_power_state *hw_ps)
-{
- if (hw_ps == NULL)
- return NULL;
-
- PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (const struct tonga_power_state *)hw_ps;
-}
-
-int tonga_add_voltage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *look_up_table,
- phm_ppt_v1_voltage_lookup_record *record)
-{
- uint32_t i;
- PP_ASSERT_WITH_CODE((NULL != look_up_table),
- "Lookup Table empty.", return -1;);
- PP_ASSERT_WITH_CODE((0 != look_up_table->count),
- "Lookup Table empty.", return -1;);
- PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count),
- "Lookup Table is full.", return -1;);
-
- /* This is to avoid entering duplicate calculated records. */
- for (i = 0; i < look_up_table->count; i++) {
- if (look_up_table->entries[i].us_vdd == record->us_vdd) {
- if (look_up_table->entries[i].us_calculated == 1)
- return 0;
- else
- break;
- }
- }
-
- look_up_table->entries[i].us_calculated = 1;
- look_up_table->entries[i].us_vdd = record->us_vdd;
- look_up_table->entries[i].us_cac_low = record->us_cac_low;
- look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
- look_up_table->entries[i].us_cac_high = record->us_cac_high;
- /* Only increment the count when we're appending, not replacing duplicate entry. */
- if (i == look_up_table->count)
- look_up_table->count++;
-
- return 0;
-}
-
-int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
- PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
- return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
-}
-
-uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
- uint32_t voltage)
-{
- uint8_t count = (uint8_t) (voltage_table->count);
- uint8_t i = 0;
-
- PP_ASSERT_WITH_CODE((NULL != voltage_table),
- "Voltage Table empty.", return 0;);
- PP_ASSERT_WITH_CODE((0 != count),
- "Voltage Table empty.", return 0;);
-
- for (i = 0; i < count; i++) {
- /* find first voltage bigger than requested */
- if (voltage_table->entries[i].value >= voltage)
- return i;
- }
-
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-/**
- * @brief PhwTonga_GetVoltageOrder
- * Returns index of requested voltage record in lookup(table)
- * @param hwmgr - pointer to hardware manager
- * @param lookupTable - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
- uint16_t voltage)
-{
- uint8_t count = (uint8_t) (look_up_table->count);
- uint8_t i;
-
- PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
- PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
-
- for (i = 0; i < count; i++) {
- /* find first voltage equal or bigger than requested */
- if (look_up_table->entries[i].us_vdd >= voltage)
- return i;
- }
-
- /* voltage is bigger than max voltage in the table */
- return i-1;
-}
-
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- /*
- * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
- * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
- * whereas voltage control is a fundemental change that will not be disabled
- */
-
- return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
-}
-
-/**
- * Re-generate the DPM level mask value
- * @param hwmgr the address of the hardware manager
- */
-static uint32_t tonga_get_dpm_level_enable_mask_value(
- struct tonga_single_dpm_table * dpm_table)
-{
- uint32_t i;
- uint32_t mask_value = 0;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask_value = mask_value << 1;
-
- if (dpm_table->dpm_levels[i-1].enabled)
- mask_value |= 0x1;
- else
- mask_value &= 0xFFFFFFFE;
- }
- return mask_value;
-}
-
-/**
- * Retrieve DPM default values from registry (if available)
- *
- * @param hwmgr the address of the powerplay hardware manager.
- */
-void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- phw_tonga_ulv_parm *ulv = &(data->ulv);
- uint32_t tmp;
-
- ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT;
- data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7;
-
- data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ABM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_NonABMSupportInPPLib);
-
- tmp = 0;
- if (tmp == 0)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicACTiming);
-
- tmp = 0;
- if (0 != tmp)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMemoryTransition);
-
- data->mclk_strobe_mode_threshold = 40000;
- data->mclk_stutter_mode_threshold = 30000;
- data->mclk_edc_enable_threshold = 40000;
- data->mclk_edc_wr_enable_threshold = 40000;
-
- tmp = 0;
- if (tmp != 0)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMCLS);
-
- data->pcie_gen_performance.max = PP_PCIEGen1;
- data->pcie_gen_performance.min = PP_PCIEGen3;
- data->pcie_gen_power_saving.max = PP_PCIEGen1;
- data->pcie_gen_power_saving.min = PP_PCIEGen3;
-
- data->pcie_lane_performance.max = 0;
- data->pcie_lane_performance.min = 16;
- data->pcie_lane_power_saving.max = 0;
- data->pcie_lane_power_saving.min = 16;
-
- tmp = 0;
-
- if (tmp)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-
-}
-
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = tonga_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- data->sram_end
- );
- }
-
- return result;
-}
-
-/**
- * Find SCLK value that is associated with specified virtual_voltage_Id.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param virtual_voltage_Id voltageId to look for.
- * @param sclk output value .
- * @return always 0 if success and 2 if association not found
- */
-static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, uint32_t *sclk)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1);
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) {
- voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd;
- if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
- break;
- }
-
- PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count,
- "Can't find requested voltage id in vdd_dep_on_sclk table!",
- return -1;
- );
-
- *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk;
-
- return 0;
-}
-
-/**
- * Get Leakage VDDC based on leakage ID.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return 2 if vddgfx returned is greater than 2V or if BIOS
- */
-int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
- uint16_t virtual_voltage_id;
- uint16_t vddc = 0;
- uint16_t vddgfx = 0;
- uint16_t i, j;
- uint32_t sclk = 0;
-
- /* retrieve voltage for leakage ID (0xff01 + i) */
- for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) {
- virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-
- /* in split mode we should have only vddgfx EVV leakages */
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
- pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- for (j = 1; j < sclk_table->count; j++) {
- if (sclk_table->entries[j].clk == sclk &&
- sclk_table->entries[j].cks_enable == 0) {
- sclk += 5000;
- break;
- }
- }
- }
- if (0 == atomctrl_get_voltage_evv_on_sclk
- (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
- virtual_voltage_id, &vddgfx)) {
- /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
- data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
- data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
- data->vddcgfx_leakage.count++;
- }
- } else {
- printk("Error retrieving EVV voltage value!\n");
- }
- }
- } else {
- /* in merged mode we have only vddc EVV leakages */
- if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
- pptable_info->vddc_lookup_table,
- virtual_voltage_id, &sclk)) {
- if (0 == atomctrl_get_voltage_evv_on_sclk
- (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
- virtual_voltage_id, &vddc)) {
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1);
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddc != 0 && vddc != virtual_voltage_id) {
- data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
- data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
- data->vddc_leakage.count++;
- }
- } else {
- printk("Error retrieving EVV voltage value!\n");
- }
- }
- }
- }
-
- return 0;
-}
-
-int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* enable SCLK dpm */
- if (0 == data->sclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Enable)),
- "Failed to enable SCLK DPM during DPM Start Function!",
- return -1);
- }
-
- /* enable MCLK dpm */
- if (0 == data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Enable)),
- "Failed to enable MCLK DPM during DPM Start Function!",
- return -1);
-
- PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, 0x100005);/*Read */
-
- udelay(10);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, 0x500005);/* write */
-
- }
-
- return 0;
-}
-
-int tonga_start_dpm(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* enable general power management */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
- /* enable sclk deep sleep */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
-
- /* prepare for PCIE DPM */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
- offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000);
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -1);
-
- if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) {
- PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
- }
-
- /* enable PCIE dpm */
- if (0 == data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Enable)),
- "Failed to enable pcie DPM during DPM Start Function!",
- return -1
- );
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition)) {
- smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableACDCGPIOInterrupt);
- }
-
- return 0;
-}
-
-int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* disable SCLK dpm */
- if (0 == data->sclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(
- !tonga_is_dpm_running(hwmgr),
- "Trying to Disable SCLK DPM when DPM is disabled",
- return -1
- );
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable)),
- "Failed to disable SCLK DPM during DPM stop Function!",
- return -1);
- }
-
- /* disable MCLK dpm */
- if (0 == data->mclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(
- !tonga_is_dpm_running(hwmgr),
- "Trying to Disable MCLK DPM when DPM is disabled",
- return -1
- );
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable)),
- "Failed to Disable MCLK DPM during DPM stop Function!",
- return -1);
- }
-
- return 0;
-}
-
-int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0);
- /* disable sclk deep sleep*/
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0);
-
- /* disable PCIE dpm */
- if (0 == data->pcie_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(
- !tonga_is_dpm_running(hwmgr),
- "Trying to Disable PCIE DPM when DPM is disabled",
- return -1
- );
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Disable)),
- "Failed to disable pcie DPM during DPM stop Function!",
- return -1);
- }
-
- if (0 != tonga_disable_sclk_mclk_dpm(hwmgr))
- PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1);
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(
- !tonga_is_dpm_running(hwmgr),
- "Trying to Disable Voltage CNTL when DPM is disabled",
- return -1
- );
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Disable)),
- "Failed to disable voltage DPM during DPM stop Function!",
- return -1);
-
- return 0;
-}
-
-int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
-
- return 0;
-}
-
-/**
- * Send a message to the SMC and return a parameter
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: pointer to the received parameter
- * @return The response that came from the SMC.
- */
-PPSMC_Result tonga_send_msg_to_smc_return_parameter(
- struct pp_hwmgr *hwmgr,
- PPSMC_Msg msg,
- uint32_t *parameter)
-{
- int result;
-
- result = smum_send_msg_to_smc(hwmgr->smumgr, msg);
-
- if ((0 == result) && parameter) {
- *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- }
-
- return result;
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t level_mask = 1 << n;
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to force SCLK when DPM is disabled",
- return -1;);
- if (0 == data->sclk_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask),
- level_mask) ? 0 : 1);
-
- return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t level_mask = 1 << n;
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to Force MCLK when DPM is disabled",
- return -1;);
- if (0 == data->mclk_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask),
- level_mask) ? 0 : 1);
-
- return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to Force PCIE level when DPM is disabled",
- return -1;);
- if (0 == data->pcie_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel),
- n) ? 0 : 1);
-
- return 0;
-}
-
-/**
- * Set the initial state by calling SMC to switch to this state directly
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
-{
- /*
- * SMC only stores one state that SW will ask to switch too,
- * so we switch the the just uploaded one
- */
- return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1;
-}
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, DpmTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->dpm_table_start = tmp;
- }
-
- error |= (0 != result);
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, SoftRegisters),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->soft_regs_start = tmp;
- tonga_smu->ulSoftRegsStart = tmp;
- }
-
- error |= (0 != result);
-
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcRegisterTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->mc_reg_table_start = tmp;
- }
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, FanTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->fan_table_start = tmp;
- }
-
- error |= (0 != result);
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->arb_table_start = tmp;
- }
-
- error |= (0 != result);
-
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, Version),
- &tmp, data->sram_end);
-
- if (0 == result) {
- hwmgr->microcode_version_info.SMC = tmp;
- }
-
- error |= (0 != result);
-
- return error ? 1 : 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
- data->clock_registers.vDLL_CNTL =
- cgs_read_register(hwmgr->device, mmDLL_CNTL);
- data->clock_registers.vMCLK_PWRMGT_CNTL =
- cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
- data->clock_registers.vMPLL_AD_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
- data->clock_registers.vMPLL_DQ_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
- data->clock_registers.vMPLL_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
- data->clock_registers.vMPLL_FUNC_CNTL_1 =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
- data->clock_registers.vMPLL_FUNC_CNTL_2 =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
- data->clock_registers.vMPLL_SS1 =
- cgs_read_register(hwmgr->device, mmMPLL_SS1);
- data->clock_registers.vMPLL_SS2 =
- cgs_read_register(hwmgr->device, mmMPLL_SS2);
-
- return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_get_memory_type(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
- data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
-
- return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
- return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
- data->acp_power_gated = false;
- data->pg_acp_init = true;
-
- return 0;
-}
-
-/**
- * Checks if DPM is enabled
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
-{
- /*
- * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
- * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
- * whereas voltage control is a fundemental change that will not be disabled
- */
- return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
-}
-
-/**
- * Checks if DPM is stopped
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (tonga_is_dpm_running(hwmgr)) {
- /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
- if (!data->dpm_table_start) {
- return 1;
- }
- }
-
- return 0;
-}
-
-/**
- * Remove repeated voltage values and create table with unique values.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param voltage_table the pointer to changing voltage table
- * @return 1 in success
- */
-
-static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
- pp_atomctrl_voltage_table *voltage_table)
-{
- uint32_t table_size, i, j;
- uint16_t vvalue;
- bool bVoltageFound = false;
- pp_atomctrl_voltage_table *table;
-
- PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
- table_size = sizeof(pp_atomctrl_voltage_table);
- table = kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == table)
- return -ENOMEM;
-
- memset(table, 0x00, table_size);
- table->mask_low = voltage_table->mask_low;
- table->phase_delay = voltage_table->phase_delay;
-
- for (i = 0; i < voltage_table->count; i++) {
- vvalue = voltage_table->entries[i].value;
- bVoltageFound = false;
-
- for (j = 0; j < table->count; j++) {
- if (vvalue == table->entries[j].value) {
- bVoltageFound = true;
- break;
- }
- }
-
- if (!bVoltageFound) {
- table->entries[table->count].value = vvalue;
- table->entries[table->count].smio_low =
- voltage_table->entries[i].smio_low;
- table->count++;
- }
- }
-
- memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table));
-
- kfree(table);
-
- return 0;
-}
-
-static int tonga_get_svi2_vdd_ci_voltage_table(
- struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table)
-{
- uint32_t i;
- int result;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count),
- "Voltage Dependency Table empty.", return -1;);
-
- vddci_voltage_table->mask_low = 0;
- vddci_voltage_table->phase_delay = 0;
- vddci_voltage_table->count = voltage_dependency_table->count;
-
- for (i = 0; i < voltage_dependency_table->count; i++) {
- vddci_voltage_table->entries[i].value =
- voltage_dependency_table->entries[i].vddci;
- vddci_voltage_table->entries[i].smio_low = 0;
- }
-
- result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim VDDCI table.", return result;);
-
- return 0;
-}
-
-
-
-static int tonga_get_svi2_vdd_voltage_table(
- struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *look_up_table,
- pp_atomctrl_voltage_table *voltage_table)
-{
- uint8_t i = 0;
-
- PP_ASSERT_WITH_CODE((0 != look_up_table->count),
- "Voltage Lookup Table empty.", return -1;);
-
- voltage_table->mask_low = 0;
- voltage_table->phase_delay = 0;
-
- voltage_table->count = look_up_table->count;
-
- for (i = 0; i < voltage_table->count; i++) {
- voltage_table->entries[i].value = look_up_table->entries[i].us_vdd;
- voltage_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-/*
- * -------------------------------------------------------- Voltage Tables --------------------------------------------------------------------------
- * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries.
- */
-
-static void tonga_trim_voltage_table_to_fit_state_table(
- struct pp_hwmgr *hwmgr,
- uint32_t max_voltage_steps,
- pp_atomctrl_voltage_table *voltage_table)
-{
- unsigned int i, diff;
-
- if (voltage_table->count <= max_voltage_steps) {
- return;
- }
-
- diff = voltage_table->count - max_voltage_steps;
-
- for (i = 0; i < max_voltage_steps; i++) {
- voltage_table->entries[i] = voltage_table->entries[i + diff];
- }
-
- voltage_table->count = max_voltage_steps;
-
- return;
-}
-
-/**
- * Create Voltage Tables.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result;
-
- /* MVDD has only GPIO voltage control */
- if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve MVDD table.", return result;);
- }
-
- if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
- /* GPIO voltage */
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDCI table.", return result;);
- } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
- /* SVI2 voltage */
- result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr,
- pptable_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
- }
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- /* VDDGFX has only SVI2 voltage control */
- result = tonga_get_svi2_vdd_voltage_table(hwmgr,
- pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
- }
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- /* VDDC has only SVI2 voltage control */
- result = tonga_get_svi2_vdd_voltage_table(hwmgr,
- pptable_info->vddc_lookup_table, &(data->vddc_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from lookup table.", return result;);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)),
- "Too many voltage values for VDDC. Trimming to fit state table.",
- tonga_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
- );
-
- PP_ASSERT_WITH_CODE(
- (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)),
- "Too many voltage values for VDDGFX. Trimming to fit state table.",
- tonga_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table));
- );
-
- PP_ASSERT_WITH_CODE(
- (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)),
- "Too many voltage values for VDDCI. Trimming to fit state table.",
- tonga_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
- );
-
- PP_ASSERT_WITH_CODE(
- (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)),
- "Too many voltage values for MVDD. Trimming to fit state table.",
- tonga_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
- );
-
- return 0;
-}
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- table->VddcLevelCount = data->vddc_voltage_table.count;
- for (count = 0; count < table->VddcLevelCount; count++) {
- table->VddcTable[count] =
- PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
- }
- return 0;
-}
-
-/**
- * VddGfx table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
- for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
- table->VddGfxTable[count] =
- PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
- }
- return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- table->VddciLevelCount = data->vddci_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
- table->SmioTable1.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
- table->SmioTable1.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->vddci_voltage_table.entries[count].smio_low;
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->MvddLevelCount; count++) {
- table->SmioTable2.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->mvdd_voltage_table.entries[count].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
- }
-
- return 0;
-}
-
-/**
- * Convert a voltage value in mv unit to VID number required by SMU firmware
- */
-static uint8_t convert_to_vid(uint16_t vddc)
-{
- return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-
-/**
- * Preparation of vddc and vddgfx CAC tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
- struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table;
-
- /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */
- uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
- uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
-
- for (count = 0; count < vddcLevelCount; count++) {
- /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
- index = tonga_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
-
- if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) {
- /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
- for (count = 0; count < vddgfxLevelCount; count++) {
- index = tonga_get_voltage_index(vddgfx_lookup_table,
- data->vddgfx_voltage_table.entries[count].value);
- table->BapmVddGfxVidLoSidd[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low);
- table->BapmVddGfxVidHiSidd[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid);
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
- }
- } else {
- for (count = 0; count < vddcLevelCount; count++) {
- index = tonga_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddGfxVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddGfxVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
- }
-
- return 0;
-}
-
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-
-int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result;
-
- result = tonga_populate_smc_vddc_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate VDDC voltage table to SMC", return -1);
-
- result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate VDDCI voltage table to SMC", return -1);
-
- result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate VDDGFX voltage table to SMC", return -1);
-
- result = tonga_populate_smc_mvdd_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate MVDD voltage table to SMC", return -1);
-
- result = tonga_populate_cac_tables(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate CAC voltage tables to SMC", return -1);
-
- return 0;
-}
-
-/**
- * Populates the SMC VRConfig field in DPM table.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- /* Splitted mode */
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= config;
- } else {
- printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n");
- }
- } else {
- /* Merged mode */
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n");
- }
- }
-
- /* Set Vddci Voltage Controller */
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- }
-
- /* Set Mvdd Voltage Controller */
- if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i = 0;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -1;
-
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i].vddgfx);
-
- voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i].vddc);
-
- if (allowed_clock_voltage_table->entries[i].vddci) {
- voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i].vddci);
- } else {
- voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta);
- }
-
- if (allowed_clock_voltage_table->entries[i].mvdd) {
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
- }
-
- voltage->Phases = 1;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddgfx);
- voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddc);
-
- if (allowed_clock_voltage_table->entries[i-1].vddci) {
- voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i-1].vddci);
- }
- if (allowed_clock_voltage_table->entries[i-1].mvdd) {
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
- }
-
- return 0;
-}
-
-/**
- * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_reset_to_default(struct pp_hwmgr *hwmgr)
-{
- return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1;
-}
-
-int tonga_populate_memory_timing_parameters(
- struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint32_t memory_clock,
- struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
- )
-{
- uint32_t dramTiming;
- uint32_t dramTiming2;
- uint32_t burstTime;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- engine_clock, memory_clock);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
- return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- int result = 0;
- SMU72_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
-
- memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = tonga_populate_memory_timing_parameters
- (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
-
- if (0 != result) {
- break;
- }
- }
- }
-
- if (0 == result) {
- result = tonga_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU72_Discrete_MCArbDramTimingTable),
- data->sram_end
- );
- }
-
- return result;
-}
-
-static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct tonga_dpm_table *dpm_table = &data->dpm_table;
- uint32_t i;
-
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount =
- (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
- }
-
- data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- table->UvdLevelCount = (uint8_t) (mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage.Vddc =
- tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->UvdLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
- tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->UvdLevel[count].MinVoltage.Vddci =
- tonga_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - data->vddc_vddci_delta);
- table->UvdLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage);
- }
-
- return result;
-
-}
-
-static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- table->VceLevelCount = (uint8_t) (mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency =
- mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage.Vddc =
- tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->VceLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
- tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->VceLevel[count].MinVoltage.Vddci =
- tonga_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - data->vddc_vddci_delta);
- table->VceLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock", return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- table->AcpLevelCount = (uint8_t) (mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage.Vddc =
- tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->AcpLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
- tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->AcpLevel[count].MinVoltage.Vddci =
- tonga_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - data->vddc_vddci_delta);
- table->AcpLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t) (mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage.Vddc =
- tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->SamuLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
- tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->SamuLevel[count].MinVoltage.Vddci =
- tonga_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - data->vddc_vddci_delta);
- table->SamuLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- }
-
- return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_calculate_mclk_params(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *mclk,
- bool strobe_mode,
- bool dllStateOn
- )
-{
- const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
- uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
- uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
- uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
- uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
- uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
- uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
- uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
- pp_atomctrl_memory_clock_param mpll_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_si(hwmgr,
- memory_clock, &mpll_param, strobe_mode);
- PP_ASSERT_WITH_CODE(0 == result,
- "Error retrieving Memory Clock Parameters from VBIOS.", return result);
-
- /* MPLL_FUNC_CNTL setup*/
- mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
-
- /* MPLL_FUNC_CNTL_1 setup*/
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
-
- /* MPLL_AD_FUNC_CNTL setup*/
- mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
- MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-
- if (data->is_memory_GDDR5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
- pp_atomctrl_internal_ss_info ss_info;
- uint32_t freq_nom;
- uint32_t tmp;
- uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
- /* for GDDR5 for all modes and DDR3 */
- if (1 == mpll_param.qdr)
- freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
- else
- freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
- /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
- tmp = (freq_nom / reference_clock);
- tmp = tmp * tmp;
-
- if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
- uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
- uint32_t clkv =
- (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
- ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
- mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
- }
- }
-
- /* MCLK_PWRMGT_CNTL setup */
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = memory_clock;
- mclk->MpllFuncCntl = mpll_func_cntl;
- mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
- mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
- mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
- mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
- mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
- mclk->DllCntl = dll_cntl;
- mclk->MpllSs1 = mpll_ss1;
- mclk->MpllSs2 = mpll_ss2;
-
- return 0;
-}
-
-static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
- bool strobe_mode)
-{
- uint8_t mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500) {
- mc_para_index = 0x00;
- } else if (memory_clock > 47500) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- }
- } else {
- if (memory_clock < 65000) {
- mc_para_index = 0x00;
- } else if (memory_clock > 135000) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
- }
-
- return mc_para_index;
-}
-
-static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
- uint8_t mc_para_index;
-
- if (memory_clock < 10000) {
- mc_para_index = 0;
- } else if (memory_clock >= 80000) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
- }
-
- return mc_para_index;
-}
-
-static int tonga_populate_single_memory_level(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *memory_level
- )
-{
- uint32_t minMvdd = 0;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- bool dllStateOn;
- struct cgs_display_info info = {0};
-
-
- if (NULL != pptable_info->vdd_dep_on_mclk) {
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
- }
-
- if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) {
- memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
- } else {
- memory_level->MinMvdd = minMvdd;
- }
- memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
-
- /* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- memory_level->StutterEnable = 0;
- memory_level->StrobeEnable = 0;
- memory_level->EdcReadEnable = 0;
- memory_level->EdcWriteEnable = 0;
- memory_level->RttEnable = 0;
-
- /* default set to low watermark. Highest level will be set to high later.*/
- memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
-
- if ((data->mclk_stutter_mode_threshold != 0) &&
- (memory_clock <= data->mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled)
- && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
- && (data->display_timing.num_existing_displays <= 2)
- && (data->display_timing.num_existing_displays != 0))
- memory_level->StutterEnable = 1;
-
- /* decide strobe mode*/
- memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
- (memory_clock <= data->mclk_strobe_mode_threshold);
-
- /* decide EDC mode and memory clock ratio*/
- if (data->is_memory_GDDR5) {
- memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
- memory_level->StrobeEnable);
-
- if ((data->mclk_edc_enable_threshold != 0) &&
- (memory_clock > data->mclk_edc_enable_threshold)) {
- memory_level->EdcReadEnable = 1;
- }
-
- if ((data->mclk_edc_wr_enable_threshold != 0) &&
- (memory_clock > data->mclk_edc_wr_enable_threshold)) {
- memory_level->EdcWriteEnable = 1;
- }
-
- if (memory_level->StrobeEnable) {
- if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
- ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- } else {
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
- }
-
- } else {
- dllStateOn = data->dll_defaule_on;
- }
- } else {
- memory_level->StrobeRatio =
- tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- }
-
- result = tonga_calculate_mclk_params(hwmgr,
- memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
-
- if (0 == result) {
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
- /* MCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
- /* Indicates maximum activity level for this performance level.*/
- CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
- }
-
- return result;
-}
-
-/**
- * Populates the SMC MVDD structure using the provided memory clock.
- *
- * @param hwmgr the address of the hardware manager
- * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
- * @param voltage the SMC VOLTAGE structure to be populated
- */
-int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern)
-{
- const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) {
- /* Always round to higher voltage. */
- smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
-
- PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.", return -1);
-
- } else {
- return -1;
- }
-
- return 0;
-}
-
-
-static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- SMIO_Pattern voltage_level;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
- /* The ACPI state should not do DPM on DC (or ever).*/
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage;
-
- /* assign zero for now*/
- table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* divider ID for required SCLK*/
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
- CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
-
- /* For various features to be enabled/disabled while this level is active.*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- /* SCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
- table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage;
-
- /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
- if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- /* Force reset on DLL*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
- /* Disable DLL in ACPIState*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
- /* Enable DLL bypass signal*/
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK0_BYPASS, 0);
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK1_BYPASS, 0);
-
- table->MemoryACPILevel.DllCntl =
- PP_HOST_TO_SMC_UL(dll_cntl);
- table->MemoryACPILevel.MclkPwrmgtCntl =
- PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
- table->MemoryACPILevel.MpllAdFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
- table->MemoryACPILevel.MpllDqFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl_1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
- table->MemoryACPILevel.MpllFuncCntl_2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
- table->MemoryACPILevel.MpllSs1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
- table->MemoryACPILevel.MpllSs2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- /* Indicates maximum activity level for this performance level.*/
- table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = 0;
- table->MemoryACPILevel.StrobeEnable = 0;
- table->MemoryACPILevel.EdcReadEnable = 0;
- table->MemoryACPILevel.EdcWriteEnable = 0;
- table->MemoryACPILevel.RttEnable = 0;
-
- return result;
-}
-
-static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
-{
- int result = 0;
- uint32_t i;
-
- for (i = 0; i < table->count; i++) {
- if (value == table->dpm_levels[i].value) {
- *boot_level = i;
- result = 0;
- }
- }
- return result;
-}
-
-static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0; /* 0 == DPM[0] (low), etc. */
- table->MemoryBootLevel = 0; /* 0 == DPM[0] (low), etc. */
-
- /* find boot level from dpm table*/
- result = tonga_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
-
- if (0 != result) {
- data->smc_state_table.GraphicsBootLevel = 0;
- printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
- in dependency table. Using Graphics DPM level 0!");
- result = 0;
- }
-
- result = tonga_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
-
- if (0 != result) {
- data->smc_state_table.MemoryBootLevel = 0;
- printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
- in dependency table. Using Memory DPM level 0!");
- result = 0;
- }
-
- table->BootVoltage.Vddc =
- tonga_get_voltage_id(&(data->vddc_voltage_table),
- data->vbios_boot_state.vddc_bootup_value);
- table->BootVoltage.VddGfx =
- tonga_get_voltage_id(&(data->vddgfx_voltage_table),
- data->vbios_boot_state.vddgfx_bootup_value);
- table->BootVoltage.Vddci =
- tonga_get_voltage_id(&(data->vddci_voltage_table),
- data->vbios_boot_state.vddci_bootup_value);
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return result;
-}
-
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
-{
- const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
- uint32_t min_engine_clock_in_sr)
-{
- uint32_t i, temp;
- uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
-
- PP_ASSERT_WITH_CODE((engine_clock >= min),
- "Engine clock can't satisfy stutter requirement!", return 0);
-
- for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
- temp = engine_clock >> i;
-
- if(temp >= min || i == 0)
- break;
- }
- return (uint8_t)i;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- uint32_t threshold;
- uint32_t mvdd;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
-
- /* populate graphics levels*/
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_sclk, engine_clock,
- &graphic_level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for VDDC \
- engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
-
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 0;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 0;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- threshold = engine_clock * data->fast_watemark_threshold / 100;
-/*
- *get the DAL clock. do it in funture.
- PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
- data->display_timing.min_clock_insr = minClocks.engineClockInSR;
-*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep))
- graphic_level->DeepSleepDivId =
- tonga_get_sleep_divider_id_from_clock(engine_clock,
- data->display_timing.min_clock_insr);
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (0 == result) {
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct tonga_dpm_table *dpm_table = &data->dpm_table;
- phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
- uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t level_array_adress = data->dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
- uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS; /* 64 -> long; 32 -> int*/
- SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
- uint32_t i, maxEntry;
- uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
- PECI_RegistryValue reg_value;
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = tonga_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)data->activity_target[i],
- &(data->smc_state_table.GraphicsLevel[i]));
-
- if (0 != result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
-
- if (0 == i) {
- reg_value = 0;
- if (reg_value != 0)
- data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value;
- }
-
- if (1 == i) {
- reg_value = 0;
- if (reg_value != 0)
- data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value;
- }
- }
-
- /* Only enable level 0 for now. */
- data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
- "There must be 1 or more PCIE levels defined in PPTable.", return -1);
- maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
- (uint8_t) ((i < maxEntry) ? i : maxEntry);
- }
- } else {
- if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
- printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!");
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(highest_pcie_level_enabled+1))) != 0)) {
- highest_pcie_level_enabled++;
- }
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<lowest_pcie_level_enabled)) == 0)) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
- count++;
- }
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++) {
- data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
- }
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change*/
- result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
- if (0 != result)
- return result;
-
- return 0;
-}
-
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-
-static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct tonga_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY;
- SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero", return -1);
- result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
- &(data->smc_state_table.MemoryLevel[i]));
- if (0 != result) {
- return result;
- }
- }
-
- /* Only enable level 0 for now.*/
- data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
- data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
- data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
- data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change*/
- result = tonga_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
- if (0 != result) {
- return result;
- }
-
- return 0;
-}
-
-struct TONGA_DLL_SPEED_SETTING {
- uint16_t Min; /* Minimum Data Rate*/
- uint16_t Max; /* Maximum Data Rate*/
- uint32_t dll_speed; /* The desired DLL_SPEED setting*/
-};
-
-static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
-
-
-static int tonga_reset_single_dpm_table(
- struct pp_hwmgr *hwmgr,
- struct tonga_single_dpm_table *dpm_table,
- uint32_t count)
-{
- uint32_t i;
- if (!(count <= MAX_REGULAR_DPM_NUMBER))
- printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
- table entries to exceed max number! \n");
-
- dpm_table->count = count;
- for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
- dpm_table->dpm_levels[i].enabled = false;
- }
-
- return 0;
-}
-
-static void tonga_setup_pcie_table_entry(
- struct tonga_single_dpm_table *dpm_table,
- uint32_t index, uint32_t pcie_gen,
- uint32_t pcie_lanes)
-{
- dpm_table->dpm_levels[index].value = pcie_gen;
- dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
- uint32_t i, maxEntry;
-
- if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
- data->pcie_gen_power_saving = data->pcie_gen_performance;
- data->pcie_lane_power_saving = data->pcie_lane_performance;
- } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
- data->pcie_gen_performance = data->pcie_gen_power_saving;
- data->pcie_lane_performance = data->pcie_lane_power_saving;
- }
-
- tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK);
-
- if (pcie_table != NULL) {
- /*
- * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue).
- * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry.
- */
- maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU72_MAX_LEVELS_LINK : pcie_table->count;
- for (i = 1; i < maxEntry; i++) {
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1,
- get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- }
- data->dpm_table.pcie_speed_table.count = maxEntry - 1;
- } else {
- /* Hardcode Pcie Table */
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- data->dpm_table.pcie_speed_table.count = 6;
- }
- /* Populate last level for boot PCIE level, but do not increment count. */
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
- data->dpm_table.pcie_speed_table.count,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-
- return 0;
-
-}
-
-/*
- * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these state tables to the allowed range based
- * on the power policy or external client requests, such as UVD request, etc.
- */
-static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table =
- pptable_info->vdd_dep_on_sclk;
- phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table =
- pptable_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
- "SCLK dependency table has to have is missing. This table is mandatory", return -1);
-
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
- "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
-
- /* clear the state table to reset everything to default */
- memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
- tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS);
- tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY);
- /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */
- /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/
- /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/
- /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/
-
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory", return -1);
- /* Initialize Sclk DPM table based on allow Sclk values*/
- data->dpm_table.sclk_table.count = 0;
-
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
- allowed_vdd_sclk_table->entries[i].clk) {
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
- allowed_vdd_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
- data->dpm_table.sclk_table.count++;
- }
- }
-
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory", return -1);
- /* Initialize Mclk DPM table based on allow Mclk values */
- data->dpm_table.mclk_table.count = 0;
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
- allowed_vdd_mclk_table->entries[i].clk) {
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
- allowed_vdd_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
- data->dpm_table.mclk_table.count++;
- }
- }
-
- /* setup PCIE gen speed levels*/
- tonga_setup_default_pcie_tables(hwmgr);
-
- /* save a copy of the default DPM table*/
- memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table));
-
- return 0;
-}
-
-int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
- const struct tonga_power_state *bootState)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count);
- for (level = 0; level < count; level++) {
- if (pptable_info->vdd_dep_on_sclk->entries[level].clk >=
- bootState->performance_levels[0].engine_clock) {
- data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (pptable_info->vdd_dep_on_mclk->entries[level].clk >=
- bootState->performance_levels[0].memory_clock) {
- data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMU72_Discrete_DpmTable *table = &(data->smc_state_table);
- const phw_tonga_ulv_parm *ulv = &(data->ulv);
- uint8_t i;
- PECI_RegistryValue reg_value;
- pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-
- result = tonga_setup_default_dpm_tables(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup default DPM tables!", return result;);
- memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
- if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) {
- tonga_populate_smc_voltage_tables(hwmgr, table);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition)) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc)) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
- }
-
- if (data->is_memory_GDDR5) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
- }
-
- i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
-
- if (i == 1 || i == 0) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL;
- }
-
- if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) {
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result;);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
- }
-
- result = tonga_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result;);
-
- result = tonga_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result;);
-
- result = tonga_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result;);
-
- result = tonga_populate_smv_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result;);
-
- result = tonga_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result;);
-
- result = tonga_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result;);
-
- result = tonga_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
-
- /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
- /* need to populate the ARB settings for the initial state. */
- result = tonga_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result;);
-
- result = tonga_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result;);
-
- result = tonga_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result;);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = tonga_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!", return result;);
- }
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- pptable_info->cac_dtp_table->usTargetOperatingTemp *
- TONGA_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- TONGA_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
-
- /*
- * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had)
- * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
- * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay
- * To avoid it, we set PCIeBootLinkLevel to highest dpm level
- */
- PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -1);
-
- table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
-
- table->PCIeGenInterval = 1;
-
- result = tonga_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- reg_value = 0;
- if ((0 == reg_value) &&
- (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
- &gpio_pin_assignment))) {
- table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = TONGA_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- /* ACDC Switch GPIO */
- reg_value = 0;
- if ((0 == reg_value) &&
- (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin_assignment))) {
- table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = TONGA_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
-
- reg_value = 0;
- if (1 == reg_value) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
- }
-
- reg_value = 0;
- if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
- THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-
- table->ThermOutPolarity =
- (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0;
-
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal)){
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- }
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) {
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController),
- data->sram_end);
-
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result;);
-
- return result;
-}
-
-/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/
-static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
- return;
-}
-
-int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
- PPSMC_Result result;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* Apply minimum voltage based on DAL's request level */
- tonga_apply_dal_minimum_voltage_request(hwmgr);
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- if (tonga_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
-
- if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Sclk Dpm enable Mask failed", return -1);
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- if (tonga_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
-
- if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Mclk Dpm enable Mask failed", return -1);
- }
- }
-
- return 0;
-}
-
-
-int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
- uint32_t level, tmp;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (0 == data->pcie_dpm_key_disabled) {
- /* PCIE */
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
- "force highest pcie dpm state failed!", return -1);
- }
- }
- }
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* SCLK */
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
- "force highest sclk dpm state failed!", return -1);
- if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
- printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
- Curr_Sclk_Index does not match the level \n");
-
- }
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* MCLK */
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
- "force highest mclk dpm state failed!", return -1);
- if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
- printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
- Curr_Mclk_Index does not match the level \n");
- }
- }
- }
-
- return 0;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
-{
- cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
- hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
- return 0;
-}
-
-/**
- * Initialize Dynamic State Adjustment Rule Settings
- *
- * @param hwmgr the address of the powerplay hardware manager.
- */
-int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size;
- struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- hwmgr->dyn_state.mclk_sclk_ratio = 4;
- hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */
- hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */
-
- /* initialize vddc_dep_on_dal_pwrl table */
- table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == table_clk_vlt) {
- printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
- return -ENOMEM;
- } else {
- table_clk_vlt->count = 4;
- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
- table_clk_vlt->entries[0].v = 0;
- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
- table_clk_vlt->entries[1].v = 720;
- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
- table_clk_vlt->entries[2].v = 810;
- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
- table_clk_vlt->entries[3].v = 900;
- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
- }
-
- return 0;
-}
-
-static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
- pptable_info->vdd_dep_on_sclk;
- phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
- pptable_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
- This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
- This table is mandatory", return -1);
-
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
- This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. \
- This table is mandatory", return -1);
-
- data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
- data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-
- pptable_info->max_clock_voltage_on_ac.sclk =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
- pptable_info->max_clock_voltage_on_ac.mclk =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
- pptable_info->max_clock_voltage_on_ac.vddc =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
- pptable_info->max_clock_voltage_on_ac.vddci =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
- hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
- pptable_info->max_clock_voltage_on_ac.sclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
- pptable_info->max_clock_voltage_on_ac.mclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
- pptable_info->max_clock_voltage_on_ac.vddc;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
- pptable_info->max_clock_voltage_on_ac.vddci;
-
- return 0;
-}
-
-int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- int result = 1;
-
- PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
- "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
- return result);
-
- if (0 == data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
- hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel)),
- "unforce pcie level failed!",
- return -1);
- }
-
- result = tonga_upload_dpm_level_enable_mask(hwmgr);
-
- return result;
-}
-
-static uint32_t tonga_get_lowest_enable_level(
- struct pp_hwmgr *hwmgr, uint32_t level_mask)
-{
- uint32_t level = 0;
-
- while (0 == (level_mask & (1 << level)))
- level++;
-
- return level;
-}
-
-static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
- uint32_t level;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (0 == data->pcie_dpm_key_disabled) {
- /* PCIE */
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
- level = tonga_get_lowest_enable_level(hwmgr,
- data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
- "force lowest pcie dpm state failed!", return -1);
- }
- }
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* SCLK */
- if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = tonga_get_lowest_enable_level(hwmgr,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
- "force sclk dpm state failed!", return -1);
-
- if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
- printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
- Curr_Sclk_Index does not match the level \n");
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* MCLK */
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
- level = tonga_get_lowest_enable_level(hwmgr,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
- "force lowest mclk dpm state failed!", return -1);
- if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
- printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
- Curr_Mclk_Index does not match the level \n");
- }
- }
-
- return 0;
-}
-
-static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- uint8_t voltageId;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
- phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddgfx =
- pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd;
- }
- } else {
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddc =
- pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- voltageId = mclk_table->entries[entryId].vddInd;
- mclk_table->entries[entryId].vddc =
- pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mm_table->count; ++entryId) {
- voltageId = mm_table->entries[entryId].vddcInd;
- mm_table->entries[entryId].vddc =
- pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- return 0;
-
-}
-
-static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- phm_ppt_v1_voltage_lookup_record v_record;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
- phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
-
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- if (sclk_table->entries[entryId].vdd_offset & (1 << 15))
- v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
- sclk_table->entries[entryId].vdd_offset - 0xFFFF;
- else
- v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
- sclk_table->entries[entryId].vdd_offset;
-
- sclk_table->entries[entryId].vddc =
- v_record.us_cac_low = v_record.us_cac_mid =
- v_record.us_cac_high = v_record.us_vdd;
-
- tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- if (mclk_table->entries[entryId].vdd_offset & (1 << 15))
- v_record.us_vdd = mclk_table->entries[entryId].vddc +
- mclk_table->entries[entryId].vdd_offset - 0xFFFF;
- else
- v_record.us_vdd = mclk_table->entries[entryId].vddc +
- mclk_table->entries[entryId].vdd_offset;
-
- mclk_table->entries[entryId].vddgfx = v_record.us_cac_low =
- v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
- tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
- }
- }
-
- return 0;
-
-}
-
-static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t entryId;
- phm_ppt_v1_voltage_lookup_record v_record;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- for (entryId = 0; entryId < mm_table->count; entryId++) {
- if (mm_table->entries[entryId].vddgfx_offset & (1 << 15))
- v_record.us_vdd = mm_table->entries[entryId].vddc +
- mm_table->entries[entryId].vddgfx_offset - 0xFFFF;
- else
- v_record.us_vdd = mm_table->entries[entryId].vddc +
- mm_table->entries[entryId].vddgfx_offset;
-
- /* Add the calculated VDDGFX to the VDDGFX lookup table */
- mm_table->entries[entryId].vddgfx = v_record.us_cac_low =
- v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
- tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
- }
- }
- return 0;
-}
-
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
- */
-static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
- uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable)
-{
- uint32_t leakage_index;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) {
- /* if this voltage matches a leakage voltage ID */
- /* patch with actual leakage voltage */
- if (pLeakageTable->leakage_id[leakage_index] == *voltage) {
- *voltage = pLeakageTable->actual_voltage[leakage_index];
- break;
- }
- }
-
- if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
- * Patch voltage lookup table by EVV leakages.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to voltage lookup table
- * @param pointer to leakage table
- * @return always 0
- */
-static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- phw_tonga_leakage_voltage *pLeakageTable)
-{
- uint32_t i;
-
- for (i = 0; i < lookup_table->count; i++) {
- tonga_patch_with_vdd_leakage(hwmgr,
- &lookup_table->entries[i].us_vdd, pLeakageTable);
- }
-
- return 0;
-}
-
-static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr,
- phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc)
-{
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable);
- hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
- pptable_info->max_clock_voltage_on_dc.vddc;
-
- return 0;
-}
-
-static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage(
- struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable,
- uint16_t *Vddgfx)
-{
- tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable);
- return 0;
-}
-
-int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- uint32_t table_size, i, j;
- phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
- table_size = lookup_table->count;
-
- PP_ASSERT_WITH_CODE(0 != lookup_table->count,
- "Lookup table is empty", return -1);
-
- /* Sorting voltages */
- for (i = 0; i < table_size - 1; i++) {
- for (j = i + 1; j > 0; j--) {
- if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j-1];
- lookup_table->entries[j-1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
- }
- }
- }
-
- return 0;
-}
-
-static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- int tmp_result;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
- pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr,
- &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx);
- if (tmp_result != 0)
- result = tmp_result;
- } else {
- tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
- pptable_info->vddc_lookup_table, &(data->vddc_leakage));
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr,
- &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc);
- if (tmp_result != 0)
- result = tmp_result;
- }
-
- tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_calc_voltage_dependency_tables(hwmgr);
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr);
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table);
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table);
- if (tmp_result != 0)
- result = tmp_result;
-
- return result;
-}
-
-int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- data->low_sclk_interrupt_threshold = 0;
-
- return 0;
-}
-
-int tonga_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = tonga_read_clock_registers(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to read clock registers!", result = tmp_result);
-
- tmp_result = tonga_get_memory_type(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get memory type!", result = tmp_result);
-
- tmp_result = tonga_enable_acpi_power_management(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ACPI power management!", result = tmp_result);
-
- tmp_result = tonga_init_power_gate_state(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init power gate state!", result = tmp_result);
-
- tmp_result = tonga_get_mc_microcode_version(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get MC microcode version!", result = tmp_result);
-
- tmp_result = tonga_init_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init sclk threshold!", result = tmp_result);
-
- return result;
-}
-
-/**
- * Enable voltage control
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
- /* enable voltage control */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
- return 0;
-}
-
-/**
- * Checks if we want to support voltage control
- *
- * @param hwmgr the address of the powerplay hardware manager.
- */
-bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr)
-{
- const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/*---------------------------MC----------------------------*/
-
-uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
- return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
-{
- bool result = true;
-
- switch (inReg) {
- case mmMC_SEQ_RAS_TIMING:
- *outReg = mmMC_SEQ_RAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_DLL_STBY:
- *outReg = mmMC_SEQ_DLL_STBY_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD0:
- *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD1:
- *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CTRL:
- *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
- break;
-
- case mmMC_SEQ_CAS_TIMING:
- *outReg = mmMC_SEQ_CAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING:
- *outReg = mmMC_SEQ_MISC_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING2:
- *outReg = mmMC_SEQ_MISC_TIMING2_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CMD:
- *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CTL:
- *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D0:
- *outReg = mmMC_SEQ_RD_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D1:
- *outReg = mmMC_SEQ_RD_CTL_D1_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D0:
- *outReg = mmMC_SEQ_WR_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D1:
- *outReg = mmMC_SEQ_WR_CTL_D1_LP;
- break;
-
- case mmMC_PMG_CMD_EMRS:
- *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS:
- *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS1:
- *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
- break;
-
- case mmMC_SEQ_PMG_TIMING:
- *outReg = mmMC_SEQ_PMG_TIMING_LP;
- break;
-
- case mmMC_PMG_CMD_MRS2:
- *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_2:
- *outReg = mmMC_SEQ_WR_CTL_2_LP;
- break;
-
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table)
-{
- uint32_t i;
- uint16_t address;
-
- for (i = 0; i < table->last; i++) {
- table->mc_reg_address[i].s0 =
- tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
- ? address : table->mc_reg_address[i].s1;
- }
- return 0;
-}
-
-int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table)
-{
- uint8_t i, j;
-
- PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
- "Invalid VramInfo table.", return -1);
-
- for (i = 0; i < table->last; i++) {
- ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
- }
- ni_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- ni_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++) {
- ni_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- }
-
- ni_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
- * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table)
-{
- uint8_t i, j, k;
- uint32_t temp_reg;
- const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- switch (table->mc_reg_address[i].s1) {
- /*
- * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
- * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- */
- case mmMC_SEQ_MISC1:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) |
- ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- }
- j++;
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
-
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
- if (!data->is_memory_GDDR5) {
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
-
- if (!data->is_memory_GDDR5) {
- table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- }
-
- break;
-
- case mmMC_SEQ_RESERVE_M:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- break;
-
- default:
- break;
- }
-
- }
-
- table->last = j;
-
- return 0;
-}
-
-int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
-{
- uint8_t i, j;
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] !=
- table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
- break;
- }
- }
- }
-
- return 0;
-}
-
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- pp_atomctrl_mc_reg_table *table;
- phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table;
- uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
-
- table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
- if (NULL == table)
- return -ENOMEM;
-
- /* Program additional LP registers that are no longer programmed by VBIOS */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
- result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
- if (0 == result)
- result = tonga_copy_vbios_smc_reg_table(table, ni_table);
-
- if (0 == result) {
- tonga_set_s0_mc_reg_index(ni_table);
- result = tonga_set_mc_special_registers(hwmgr, ni_table);
- }
-
- if (0 == result)
- tonga_set_valid_flag(ni_table);
-
- kfree(table);
- return result;
-}
-
-/*
-* Copy one arb setting to another and then switch the active set.
-* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
-*/
-int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
- uint32_t arbFreqSrc, uint32_t arbFreqDest)
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint32_t burst_time;
- uint32_t mc_cg_config;
-
- switch (arbFreqSrc) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
- break;
-
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
- break;
-
- default:
- return -1;
- }
-
- switch (arbFreqDest) {
- case MC_CG_ARB_FREQ_F0:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
- break;
-
- case MC_CG_ARB_FREQ_F1:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
- break;
-
- default:
- return -1;
- }
-
- mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
- mc_cg_config |= 0x0000000F;
- cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
-
- return 0;
-}
-
-/**
- * Initial switch from ARB F0->F1
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
-{
- return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-/**
- * Initialize the ARB DRAM timing table's index field.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
- const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
-
- /*
- * This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'.
- * This solution is ugly, but we never write the whole table only individual fields in it.
- * In reality this field should not be in that structure but in a soft register.
- */
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, &tmp, data->sram_end);
-
- if (0 != result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return tonga_write_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, tmp, data->sram_end);
-}
-
-int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table)
-{
- const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- uint32_t i, j;
-
- for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) {
- if (data->tonga_mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
- "Index of mc_reg_table->address[] array out of boundary", return -1);
- mc_reg_table->address[i].s0 =
- PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 =
- PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
-
- mc_reg_table->last = (uint8_t)i;
-
- return 0;
-}
-
-/*convert register values from driver to SMC format */
-void tonga_convert_mc_registers(
- const phw_tonga_mc_reg_entry * pEntry,
- SMU72_Discrete_MCRegisterSet *pData,
- uint32_t numEntries, uint32_t validflag)
-{
- uint32_t i, j;
-
- for (i = 0, j = 0; j < numEntries; j++) {
- if (validflag & 1<<j) {
- pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
- i++;
- }
- }
-}
-
-/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */
-int tonga_convert_mc_reg_table_entry_to_smc(
- struct pp_hwmgr *hwmgr,
- const uint32_t memory_clock,
- SMU72_Discrete_MCRegisterSet *mc_reg_table_data
- )
-{
- const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t i = 0;
-
- for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) {
- if (memory_clock <=
- data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
- break;
- }
- }
-
- if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0))
- --i;
-
- tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag);
-
- return 0;
-}
-
-int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_MCRegisters *mc_reg_table)
-{
- int result = 0;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- int res;
- uint32_t i;
-
- for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = tonga_convert_mc_reg_table_entry_to_smc(
- hwmgr,
- data->dpm_table.mclk_table.dpm_levels[i].value,
- &mc_reg_table->data[i]
- );
-
- if (0 != res)
- result = res;
- }
-
- return result;
-}
-
-int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters));
- result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize MCRegTable for the MC register addresses!", return result;);
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize MCRegTable for driver state!", return result;);
-
- return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
- (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end);
-}
-
-/**
- * Programs static screed detection parameters
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* Set static screen threshold unit*/
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
- data->static_screen_threshold_unit);
- /* Set static screen threshold*/
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
- data->static_screen_threshold);
-
- return 0;
-}
-
-/**
- * Setup display gap for glitch free memory clock switching.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-
- display_gap = PHM_SET_FIELD(display_gap,
- CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
-
- display_gap = PHM_SET_FIELD(display_gap,
- CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- return 0;
-}
-
-/**
- * Programs activity state transition voting clients
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* Clear reset for voting clients before enabling DPM */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
- return 0;
-}
-
-
-int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = tonga_check_for_dpm_stopped(hwmgr);
-
- if (cf_tonga_voltage_control(hwmgr)) {
- tmp_result = tonga_enable_voltage_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable voltage control!", result = tmp_result);
-
- tmp_result = tonga_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!", result = tmp_result);
- }
-
- tmp_result = tonga_initialize_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize MC reg table!", result = tmp_result);
-
- tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program static screen threshold parameters!", result = tmp_result);
-
- tmp_result = tonga_enable_display_gap(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable display gap!", result = tmp_result);
-
- tmp_result = tonga_program_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program voting clients!", result = tmp_result);
-
- tmp_result = tonga_process_firmware_header(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to process firmware header!", result = tmp_result);
-
- tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!", result = tmp_result);
-
- tmp_result = tonga_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize SMC table!", result = tmp_result);
-
- tmp_result = tonga_init_arb_table_index(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize ARB table index!", result = tmp_result);
-
- tmp_result = tonga_populate_initial_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate initialize MC Reg table!", result = tmp_result);
-
- tmp_result = tonga_notify_smc_display_change(hwmgr, false);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify no display!", result = tmp_result);
-
- /* enable SCLK control */
- tmp_result = tonga_enable_sclk_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SCLK control!", result = tmp_result);
-
- /* enable DPM */
- tmp_result = tonga_start_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start DPM!", result = tmp_result);
-
- return result;
-}
-
-int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = tonga_check_for_dpm_running(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "SMC is still running!", return 0);
-
- tmp_result = tonga_stop_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to stop DPM!", result = tmp_result);
-
- tmp_result = tonga_reset_to_default(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to reset to default!", result = tmp_result);
-
- return result;
-}
-
-int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = tonga_set_boot_state(hwmgr);
- if (0 != result)
- printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n");
-
- return result;
-}
-
-int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- return phm_hwmgr_backend_fini(hwmgr);
-}
-
-/**
- * Initializes the Volcanic Islands Hardware Manager
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return 1 if success; otherwise appropriate error code.
- */
-int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- SMU72_Discrete_DpmTable *table = NULL;
- tonga_hwmgr *data;
- pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phw_tonga_ulv_parm *ulv;
- struct cgs_system_info sys_info = {0};
-
- PP_ASSERT_WITH_CODE((NULL != hwmgr),
- "Invalid Parameter!", return -1;);
-
- data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
-
- data->dll_defaule_on = false;
- data->sram_end = SMC_RAM_END;
-
- data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT;
-
- data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
- data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA;
- data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableVoltageIsland);
-
- data->sclk_dpm_key_disabled = 0;
- data->mclk_dpm_key_disabled = 0;
- data->pcie_dpm_key_disabled = 0;
- data->pcc_monitor_enabled = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UnTabledHardwareInterface);
-
- data->gpio_debug = 0;
- data->engine_clock_data = 0;
- data->memory_clock_data = 0;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPatchPowerState);
-
- /* need to set voltage control types before EVV patching*/
- data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE;
- data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
- data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
- data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
- data->force_pcie_gen = PP_PCIEGenInvalid;
-
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
- data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDGFX)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
- data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
- }
- }
-
- if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDGFX);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) {
- data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
- }
- }
-
- if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- if (pptable_info->cac_dtp_table->usClockStretchAmount != 0)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
-
- /* Initializes DPM default values*/
- tonga_initialize_dpm_defaults(hwmgr);
-
- /* Get leakage voltage based on leakage ID.*/
- PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)),
- "Get EVV Voltage Failed. Abort Driver loading!", return -1);
-
- tonga_complete_dependency_tables(hwmgr);
-
- /* Parse pptable data read from VBIOS*/
- tonga_set_private_var_based_on_pptale(hwmgr);
-
- /* ULV Support*/
- ulv = &(data->ulv);
- ulv->ulv_supported = false;
-
- /* Initalize Dynamic State Adjustment Rule Settings*/
- result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
- if (result)
- printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
- data->uvd_enabled = false;
-
- table = &(data->smc_state_table);
-
- /*
- * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
- * Peak Current Control feature is enabled and we should program PCC HW register
- */
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
- uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
-
- switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
- case 0:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
- break;
- case 1:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
- break;
- case 2:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
- break;
- case 3:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
- break;
- case 4:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
- break;
- default:
- printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \
- Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n");
- break;
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCNB_PWRMGT_CNTL, temp_reg);
- }
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableSMU7ThermalManagement);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SMU7);
-
- data->vddc_phase_shed_control = false;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (!result) {
- if (sys_info.value & AMD_PG_SUPPORT_UVD)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
- if (sys_info.value & AMD_PG_SUPPORT_VCE)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
- }
-
- if (0 == result) {
- data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- TONGA_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- tonga_hwmgr_backend_fini(hwmgr);
- }
-
- return result;
-}
-
-static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = tonga_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = tonga_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = tonga_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- hwmgr->dpm_level = level;
- return ret;
-}
-
-static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *prequest_ps,
- const struct pp_power_state *pcurrent_ps)
-{
- struct tonga_power_state *tonga_ps =
- cast_phw_tonga_power_state(&prequest_ps->hardware);
-
- uint32_t sclk;
- uint32_t mclk;
- struct PP_Clocks minimum_clocks = {0};
- bool disable_mclk_switching;
- bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- const struct phm_clock_and_voltage_limits *max_limits;
- uint32_t i;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- int32_t count;
- int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
- data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
-
- PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",
- );
-
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
- &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
- &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
- if (PP_PowerSource_DC == hwmgr->power_source) {
- for (i = 0; i < tonga_ps->performance_level_count; i++) {
- if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk)
- tonga_ps->performance_levels[i].memory_clock = max_limits->mclk;
- if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk)
- tonga_ps->performance_levels[i].engine_clock = max_limits->sclk;
- }
- }
-
- tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
- tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
-
- tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
- /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
-
- max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
- stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
- for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
- if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) {
- stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk;
- break;
- }
- }
-
- if (count < 0)
- stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk;
-
- stable_pstate_mclk = max_limits->mclk;
-
- minimum_clocks.engineClock = stable_pstate_sclk;
- minimum_clocks.memoryClock = stable_pstate_mclk;
- }
-
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
-
- sclk = tonga_ps->performance_levels[0].engine_clock;
- mclk = tonga_ps->performance_levels[0].memory_clock;
-
- if (disable_mclk_switching)
- mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock;
-
- if (sclk < minimum_clocks.engineClock)
- sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
-
- if (mclk < minimum_clocks.memoryClock)
- mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
-
- tonga_ps->performance_levels[0].engine_clock = sclk;
- tonga_ps->performance_levels[0].memory_clock = mclk;
-
- tonga_ps->performance_levels[1].engine_clock =
- (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ?
- tonga_ps->performance_levels[1].engine_clock :
- tonga_ps->performance_levels[0].engine_clock;
-
- if (disable_mclk_switching) {
- if (mclk < tonga_ps->performance_levels[1].memory_clock)
- mclk = tonga_ps->performance_levels[1].memory_clock;
-
- tonga_ps->performance_levels[0].memory_clock = mclk;
- tonga_ps->performance_levels[1].memory_clock = mclk;
- } else {
- if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock)
- tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
- for (i=0; i < tonga_ps->performance_level_count; i++) {
- tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
- tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
- tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
- tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
- }
- }
-
- return 0;
-}
-
-int tonga_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
- return sizeof(struct tonga_power_state);
-}
-
-static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct tonga_power_state *tonga_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
- if (low)
- return tonga_ps->performance_levels[0].memory_clock;
- else
- return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
-}
-
-static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct tonga_power_state *tonga_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
- if (low)
- return tonga_ps->performance_levels[0].engine_clock;
- else
- return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
-}
-
-static uint16_t tonga_get_current_pcie_speed(
- struct pp_hwmgr *hwmgr)
-{
- uint32_t speed_cntl = 0;
-
- speed_cntl = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speed_cntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int tonga_get_current_pcie_lane_number(
- struct pp_hwmgr *hwmgr)
-{
- uint32_t link_width;
-
- link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__PCIE,
- PCIE_LC_LINK_WIDTH_CNTL,
- LC_LINK_WIDTH_RD);
-
- PP_ASSERT_WITH_CODE((7 >= link_width),
- "Invalid PCIe lane width!", return 0);
-
- return decode_pcie_lane_width(link_width);
-}
-
-static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps;
- ATOM_FIRMWARE_INFO_V2_2 *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- /* First retrieve the Boot clocks and VDDC from the firmware info table.
- * We assume here that fw_info is unchanged if this call fails.
- */
- fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
- hwmgr->device, index,
- &size, &frev, &crev);
- if (!fw_info)
- /* During a test, there is no firmware info table. */
- return 0;
-
- /* Patch the state. */
- data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock);
- data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock);
- data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
- data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
- data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
- data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr);
- data->vbios_boot_state.pcie_lane_bootup_value =
- (uint16_t)tonga_get_current_pcie_lane_number(hwmgr);
-
- /* set boot power state */
- ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
- ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
- ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
- ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
- return 0;
-}
-
-static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
- void *state, struct pp_power_state *power_state,
- void *pp_table, uint32_t classification_flag)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- struct tonga_power_state *tonga_ps =
- (struct tonga_power_state *)(&(power_state->hardware));
-
- struct tonga_performance_level *performance_level;
-
- ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
-
- ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
- (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-
- ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (ATOM_Tonga_SCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-
- ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
- (ATOM_Tonga_MCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
- /* The following fields are not initialized here: id orderedList allStatesList */
- power_state->classification.ui_label =
- (le16_to_cpu(state_entry->usClassification) &
- ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
- ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
- power_state->classification.flags = classification_flag;
- /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
- power_state->classification.temporary_state = false;
- power_state->classification.to_be_deleted = false;
-
- power_state->validation.disallowOnDC =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC));
-
- power_state->pcie.lanes = 0;
-
- power_state->display.disableFrameModulation = false;
- power_state->display.limitRefreshrate = false;
- power_state->display.enableVariBright =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT));
-
- power_state->validation.supportedPowerLevels = 0;
- power_state->uvd_clocks.VCLK = 0;
- power_state->uvd_clocks.DCLK = 0;
- power_state->temperatures.min = 0;
- power_state->temperatures.max = 0;
-
- performance_level = &(tonga_ps->performance_levels
- [tonga_ps->performance_level_count++]);
-
- PP_ASSERT_WITH_CODE(
- (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS),
- "Performance levels exceeds SMC limit!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (tonga_ps->performance_level_count <=
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -1);
-
- /* Performance levels are arranged from low to high. */
- performance_level->memory_clock =
- le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk);
-
- performance_level->engine_clock =
- le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk);
-
- performance_level->pcie_gen = get_pcie_gen_support(
- data->pcie_gen_cap,
- state_entry->ucPCIEGenLow);
-
- performance_level->pcie_lane = get_pcie_lane_support(
- data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- performance_level =
- &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]);
-
- performance_level->memory_clock =
- le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk);
-
- performance_level->engine_clock =
- le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk);
-
- performance_level->pcie_gen = get_pcie_gen_support(
- data->pcie_gen_cap,
- state_entry->ucPCIEGenHigh);
-
- performance_level->pcie_lane = get_pcie_lane_support(
- data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- return 0;
-}
-
-static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index, struct pp_power_state *ps)
-{
- int result;
- struct tonga_power_state *tonga_ps;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- ps->hardware.magic = PhwTonga_Magic;
-
- tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
-
- result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps,
- tonga_get_pp_table_entry_callback_func);
-
- /* This is the earliest time we have all the dependency table and the VBIOS boot state
- * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
- * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
- */
- if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
- if (dep_mclk_table->entries[0].clk !=
- data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot MCLK level");
- if (dep_mclk_table->entries[0].vddci !=
- data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot VDDCI level");
- }
-
- /* set DC compatible flag if this state supports DC */
- if (!ps->validation.disallowOnDC)
- tonga_ps->dc_compatible = true;
-
- if (ps->classification.flags & PP_StateClassificationFlag_ACPI)
- data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen;
- else if (ps->classification.flags & PP_StateClassificationFlag_Boot) {
- if (data->bacos.best_match == 0xffff) {
- /* For V.I. use boot state as base BACO state */
- data->bacos.best_match = PP_StateClassificationFlag_Boot;
- data->bacos.performance_level = tonga_ps->performance_levels[0];
- }
- }
-
- tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK;
- tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK;
-
- if (!result) {
- uint32_t i;
-
- switch (ps->classification.ui_label) {
- case PP_StateUILabel_Performance:
- data->use_pcie_performance_levels = true;
-
- for (i = 0; i < tonga_ps->performance_level_count; i++) {
- if (data->pcie_gen_performance.max <
- tonga_ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.max =
- tonga_ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_performance.min >
- tonga_ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.min =
- tonga_ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_performance.max <
- tonga_ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.max =
- tonga_ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_performance.min >
- tonga_ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.min =
- tonga_ps->performance_levels[i].pcie_lane;
- }
- break;
- case PP_StateUILabel_Battery:
- data->use_pcie_power_saving_levels = true;
-
- for (i = 0; i < tonga_ps->performance_level_count; i++) {
- if (data->pcie_gen_power_saving.max <
- tonga_ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.max =
- tonga_ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_power_saving.min >
- tonga_ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.min =
- tonga_ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_power_saving.max <
- tonga_ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.max =
- tonga_ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_power_saving.min >
- tonga_ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.min =
- tonga_ps->performance_levels[i].pcie_lane;
- }
- break;
- default:
- break;
- }
- }
- return 0;
-}
-
-static void
-tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- uint32_t sclk, mclk, activity_percent;
- uint32_t offset;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
-
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
-
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
-
- offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
- activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
- activity_percent += 0x80;
- activity_percent >>= 8;
-
- seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
- seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
- seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
- uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
- struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
- uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
- struct PP_Clocks min_clocks = {0};
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < psclk_table->count; i++) {
- if (sclk == psclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= psclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/
- if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i=0; i < pmclk_table->count; i++) {
- if (mclk == pmclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= pmclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
- return 0;
-}
-
-static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps)
-{
- uint32_t i;
- uint32_t sclk, max_sclk = 0;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_dpm_table *pdpm_table = &data->dpm_table;
-
- for (i = 0; i < hw_ps->performance_level_count; i++) {
- sclk = hw_ps->performance_levels[i].engine_clock;
- if (max_sclk < sclk)
- max_sclk = sclk;
- }
-
- for (i = 0; i < pdpm_table->sclk_table.count; i++) {
- if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk)
- return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ?
- pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value :
- pdpm_table->pcie_speed_table.dpm_levels[i].value);
- }
-
- return 0;
-}
-
-static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
- const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
-
- uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps);
- uint16_t current_link_speed;
-
- if (data->force_pcie_gen == PP_PCIEGenInvalid)
- current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps);
- else
- current_link_speed = data->force_pcie_gen;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch(target_link_speed) {
- case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
- break;
- data->force_pcie_gen = PP_PCIEGen2;
- if (current_link_speed == PP_PCIEGen2)
- break;
- case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
- break;
- default:
- data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- data->pspp_notify_required = true;
- }
-
- return 0;
-}
-
-static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
- "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
- "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
-{
- int result = 0;
-
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
- uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
- struct tonga_dpm_table *pdpm_table = &data->dpm_table;
-
- struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on the default values
- */
- PP_ASSERT_WITH_CODE(
- (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
- for (i = dpm_count; i > 1; i--) {
- if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
- clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
- pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
-
- pdpm_table->sclk_table.dpm_levels[i].value =
- pgolden_dpm_table->sclk_table.dpm_levels[i].value +
- (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
-
- } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
- clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
- pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
-
- pdpm_table->sclk_table.dpm_levels[i].value =
- pgolden_dpm_table->sclk_table.dpm_levels[i].value -
- (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
- } else
- pdpm_table->sclk_table.dpm_levels[i].value =
- pgolden_dpm_table->sclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
- pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
- PP_ASSERT_WITH_CODE(
- (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
- for (i = dpm_count; i > 1; i--) {
- if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
- clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
- pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
-
- pdpm_table->mclk_table.dpm_levels[i].value =
- pgolden_dpm_table->mclk_table.dpm_levels[i].value +
- (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
-
- } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
- clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
- pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
-
- pdpm_table->mclk_table.dpm_levels[i].value =
- pgolden_dpm_table->mclk_table.dpm_levels[i].value -
- (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
- } else
- pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = tonga_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
- /*populate MCLK dpm table to SMU7 */
- result = tonga_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- return result;
-}
-
-static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
- struct tonga_single_dpm_table * pdpm_table,
- uint32_t low_limit, uint32_t high_limit)
-{
- uint32_t i;
-
- for (i = 0; i < pdpm_table->count; i++) {
- if ((pdpm_table->dpm_levels[i].value < low_limit) ||
- (pdpm_table->dpm_levels[i].value > high_limit))
- pdpm_table->dpm_levels[i].enabled = false;
- else
- pdpm_table->dpm_levels[i].enabled = true;
- }
- return 0;
-}
-
-static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t high_limit_count;
-
- PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
- "power state did not have any performance level",
- return -1);
-
- high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
-
- tonga_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.sclk_table),
- hw_state->performance_levels[0].engine_clock,
- hw_state->performance_levels[high_limit_count].engine_clock);
-
- tonga_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.mclk_table),
- hw_state->performance_levels[0].memory_clock,
- hw_state->performance_levels[high_limit_count].memory_clock);
-
- return 0;
-}
-
-static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
-{
- int result;
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-
- result = tonga_trim_dpm_states(hwmgr, tonga_ps);
- if (0 != result)
- return result;
-
- data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
- data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
- data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- if (data->uvd_enabled)
- data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
-
- data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable :
- (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable);
-}
-
-int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
- }
-
- return tonga_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
- const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
-
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) {
- data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1);
-
- mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.VceBootLevel));
-
- tonga_enable_disable_vce_dpm(hwmgr, true);
- } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0)
- tonga_enable_disable_vce_dpm(hwmgr, false);
-
- return 0;
-}
-
-static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- uint32_t address;
- int32_t result;
-
- if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
- return 0;
-
-
- memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters));
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
-
- if(result != 0)
- return result;
-
-
- address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
-
- return tonga_copy_bytes_to_smc(hwmgr->smumgr, address,
- (uint8_t *)&data->mc_reg_table.data[0],
- sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
- data->sram_end);
-}
-
-static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return tonga_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- data->need_update_smu7_dpm_table = 0;
-
- return 0;
-}
-
-static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
- uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps);
- uint8_t request;
-
- if (data->pspp_notify_required ||
- data->pcie_performance_request) {
- if (target_link_speed == PP_PCIEGen3)
- request = PCIE_PERF_REQ_GEN3;
- else if (target_link_speed == PP_PCIEGen2)
- request = PCIE_PERF_REQ_GEN2;
- else
- request = PCIE_PERF_REQ_GEN1;
-
- if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) {
- data->pcie_performance_request = false;
- return 0;
- }
-
- if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
- if (PP_PCIEGen2 == target_link_speed)
- printk("PSPP request to switch to Gen2 from Gen3 Failed!");
- else
- printk("PSPP request to switch to Gen1 from Gen2 Failed!");
- }
- }
-
- data->pcie_performance_request = false;
- return 0;
-}
-
-static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
- int tmp_result, result = 0;
-
- tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
- }
-
- tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
-
- tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
-
- tmp_result = tonga_update_vce_dpm(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
-
- tmp_result = tonga_update_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
-
- tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
-
- tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
-
- tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
- }
-
- return result;
-}
-
-/**
-* Set maximum target operating fan output PWM
-*
-* @param pHwMgr: the address of the powerplay hardware manager.
-* @param usMaxFanPwm: max operating fan PWM in percents
-* @return The response that came from the SMC.
-*/
-static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
-}
-
-int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
-{
- uint32_t num_active_displays = 0;
- struct cgs_display_info info = {0};
- info.mode_info = NULL;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- num_active_displays = info.display_count;
-
- if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
- tonga_notify_smc_display_change(hwmgr, false);
- else
- tonga_notify_smc_display_change(hwmgr, true);
-
- return 0;
-}
-
-/**
-* Programs the display gap
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always OK
-*/
-int tonga_program_display_gap(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t num_active_displays = 0;
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
- uint32_t display_gap2;
- uint32_t pre_vbi_time_in_us;
- uint32_t frame_time_in_us;
- uint32_t ref_clock;
- uint32_t refresh_rate = 0;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_displays = info.display_count;
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- ref_clock = mode_info.ref_clock;
- refresh_rate = mode_info.refresh_rate;
-
- if(0 == refresh_rate)
- refresh_rate = 60;
-
- frame_time_in_us = 1000000 / refresh_rate;
-
- pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
- display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
- if (num_active_displays == 1)
- tonga_notify_smc_display_change(hwmgr, true);
-
- return 0;
-}
-
-int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
-
- tonga_program_display_gap(hwmgr);
-
- /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */
- return 0;
-}
-
-/**
-* Set maximum target operating fan output RPM
-*
-* @param pHwMgr: the address of the powerplay hardware manager.
-* @param usMaxFanRpm: max operating fan RPM value.
-* @return The response that came from the SMC.
-*/
-static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
-}
-
-uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr)
-{
- uint32_t reference_clock;
- uint32_t tc;
- uint32_t divide;
-
- ATOM_FIRMWARE_INFO *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
- if (tc)
- return TCLK;
-
- fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
- &size, &frev, &crev);
-
- if (!fw_info)
- return 0;
-
- reference_clock = le16_to_cpu(fw_info->usReferenceClock);
-
- divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
- if (0 != divide)
- return reference_clock / 4;
-
- return reference_clock;
-}
-
-int tonga_dpm_set_interrupt_state(void *private_data,
- unsigned src_id, unsigned type,
- int enabled)
-{
- uint32_t cg_thermal_int;
- struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- switch (type) {
- case AMD_THERMAL_IRQ_LOW_TO_HIGH:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
-
- case AMD_THERMAL_IRQ_HIGH_TO_LOW:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info)
-{
- int result;
- const struct pp_interrupt_registration_info *info =
- (const struct pp_interrupt_registration_info *)thermal_interrupt_info;
-
- if (info == NULL)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
- tonga_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
- tonga_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- return 0;
-}
-
-bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- bool is_update_required = false;
- struct cgs_display_info info = {0,0,NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
- if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
- if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
- is_update_required = true;
-*/
- return is_update_required;
-}
-
-static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1,
- const struct tonga_performance_level *pl2)
-{
- return ((pl1->memory_clock == pl2->memory_clock) &&
- (pl1->engine_clock == pl2->engine_clock) &&
- (pl1->pcie_gen == pl2->pcie_gen) &&
- (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
- const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1);
- const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2);
- int i;
-
- if (equal == NULL || psa == NULL || psb == NULL)
- return -EINVAL;
-
- /* If the two states don't even have the same number of performance levels they cannot be the same state. */
- if (psa->performance_level_count != psb->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < psa->performance_level_count; i++) {
- if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
- /* If we have found even one performance level pair that is different the states are different. */
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
- *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
- *equal &= (psa->sclk_threshold == psb->sclk_threshold);
- *equal &= (psa->acp_clk == psb->acp_clk);
-
- return 0;
-}
-
-static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
- tonga_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
-}
-
-static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->fan_ctrl_is_in_default_mode)
- return hwmgr->fan_ctrl_default_mode;
- else
- return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
- case PP_MCLK:
- if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
- case PP_PCIE:
- {
- uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
-
- while (tmp >>= 1)
- level++;
-
- if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, char *buf)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = tonga_get_current_pcie_speed(hwmgr);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
- return size;
-}
-
-static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct tonga_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- struct pp_power_state *ps;
- struct tonga_power_state *tonga_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
- tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct tonga_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- struct pp_power_state *ps;
- struct tonga_power_state *tonga_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
- tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-
-static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
- .backend_init = &tonga_hwmgr_backend_init,
- .backend_fini = &tonga_hwmgr_backend_fini,
- .asic_setup = &tonga_setup_asic_task,
- .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
- .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
- .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
- .force_dpm_level = &tonga_force_dpm_level,
- .power_state_set = tonga_set_power_state_tasks,
- .get_power_state_size = tonga_get_power_state_size,
- .get_mclk = tonga_dpm_get_mclk,
- .get_sclk = tonga_dpm_get_sclk,
- .patch_boot_state = tonga_dpm_patch_boot_state,
- .get_pp_table_entry = tonga_get_pp_table_entry,
- .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
- .print_current_perforce_level = tonga_print_current_perforce_level,
- .powerdown_uvd = tonga_phm_powerdown_uvd,
- .powergate_uvd = tonga_phm_powergate_uvd,
- .powergate_vce = tonga_phm_powergate_vce,
- .disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
- .update_clock_gatings = tonga_phm_update_clock_gatings,
- .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
- .display_config_changed = tonga_display_configuration_changed_task,
- .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
- .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output,
- .get_temperature = tonga_thermal_get_temperature,
- .stop_thermal_controller = tonga_thermal_stop_thermal_controller,
- .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent,
- .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default,
- .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm,
- .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller,
- .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt,
- .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration,
- .check_states_equal = tonga_check_states_equal,
- .set_fan_control_mode = tonga_set_fan_control_mode,
- .get_fan_control_mode = tonga_get_fan_control_mode,
- .force_clock_level = tonga_force_clock_level,
- .print_clock_levels = tonga_print_clock_levels,
- .get_sclk_od = tonga_get_sclk_od,
- .set_sclk_od = tonga_set_sclk_od,
- .get_mclk_od = tonga_get_mclk_od,
- .set_mclk_od = tonga_set_mclk_od,
-};
-
-int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
- hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
- hwmgr->pptable_func = &tonga_pptable_funcs;
- pp_tonga_thermal_initialize(hwmgr);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
deleted file mode 100644
index 3961884bfa9b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_HWMGR_H
-#define TONGA_HWMGR_H
-
-#include "hwmgr.h"
-#include "smu72_discrete.h"
-#include "ppatomctrl.h"
-#include "ppinterrupt.h"
-#include "tonga_powertune.h"
-#include "pp_endian.h"
-
-#define TONGA_MAX_HARDWARE_POWERLEVELS 2
-#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
-
-struct tonga_performance_level {
- uint32_t memory_clock;
- uint32_t engine_clock;
- uint16_t pcie_gen;
- uint16_t pcie_lane;
-};
-
-struct _phw_tonga_bacos {
- uint32_t best_match;
- uint32_t baco_flags;
- struct tonga_performance_level performance_level;
-};
-typedef struct _phw_tonga_bacos phw_tonga_bacos;
-
-struct _phw_tonga_uvd_clocks {
- uint32_t VCLK;
- uint32_t DCLK;
-};
-
-typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks;
-
-struct _phw_tonga_vce_clocks {
- uint32_t EVCLK;
- uint32_t ECCLK;
-};
-
-typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks;
-
-struct tonga_power_state {
- uint32_t magic;
- phw_tonga_uvd_clocks uvd_clocks;
- phw_tonga_vce_clocks vce_clocks;
- uint32_t sam_clk;
- uint32_t acp_clk;
- uint16_t performance_level_count;
- bool dc_compatible;
- uint32_t sclk_threshold;
- struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct _phw_tonga_dpm_level {
- bool enabled;
- uint32_t value;
- uint32_t param1;
-};
-typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level;
-
-#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define TONGA_MINIMUM_ENGINE_CLOCK 2500
-
-struct tonga_single_dpm_table {
- uint32_t count;
- phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct tonga_dpm_table {
- struct tonga_single_dpm_table sclk_table;
- struct tonga_single_dpm_table mclk_table;
- struct tonga_single_dpm_table pcie_speed_table;
- struct tonga_single_dpm_table vddc_table;
- struct tonga_single_dpm_table vdd_gfx_table;
- struct tonga_single_dpm_table vdd_ci_table;
- struct tonga_single_dpm_table mvdd_table;
-};
-typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table;
-
-
-struct _phw_tonga_clock_regisiters {
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t vDLL_CNTL;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_SS1;
- uint32_t vMPLL_SS2;
-};
-typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers;
-
-struct _phw_tonga_voltage_smio_registers {
- uint32_t vs0_vid_lower_smio_cntl;
-};
-typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers;
-
-
-struct _phw_tonga_mc_reg_entry {
- uint32_t mclk_max;
- uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry;
-
-struct _phw_tonga_mc_reg_table {
- uint8_t last; /* number of registers*/
- uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
- uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
- phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table;
-
-#define DISABLE_MC_LOADMICROCODE 1
-#define DISABLE_MC_CFGPROGRAMMING 2
-
-/*Ultra Low Voltage parameter structure */
-struct _phw_tonga_ulv_parm{
- bool ulv_supported;
- uint32_t ch_ulv_parameter;
- uint32_t ulv_volt_change_delay;
- struct tonga_performance_level ulv_power_level;
-};
-typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm;
-
-#define TONGA_MAX_LEAKAGE_COUNT 8
-
-struct _phw_tonga_leakage_voltage {
- uint16_t count;
- uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT];
- uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT];
-};
-typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage;
-
-struct _phw_tonga_display_timing {
- uint32_t min_clock_insr;
- uint32_t num_existing_displays;
-};
-typedef struct _phw_tonga_display_timing phw_tonga_display_timing;
-
-struct _phw_tonga_dpmlevel_enable_mask {
- uint32_t uvd_dpm_enable_mask;
- uint32_t vce_dpm_enable_mask;
- uint32_t acp_dpm_enable_mask;
- uint32_t samu_dpm_enable_mask;
- uint32_t sclk_dpm_enable_mask;
- uint32_t mclk_dpm_enable_mask;
- uint32_t pcie_dpm_enable_mask;
-};
-typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask;
-
-struct _phw_tonga_pcie_perf_range {
- uint16_t max;
- uint16_t min;
-};
-typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range;
-
-struct _phw_tonga_vbios_boot_state {
- uint16_t mvdd_bootup_value;
- uint16_t vddc_bootup_value;
- uint16_t vddci_bootup_value;
- uint16_t vddgfx_bootup_value;
- uint32_t sclk_bootup_value;
- uint32_t mclk_bootup_value;
- uint16_t pcie_gen_bootup_value;
- uint16_t pcie_lane_bootup_value;
-};
-typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state;
-
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-
-/* We need to review which fields are needed. */
-/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
-struct tonga_hwmgr {
- struct tonga_dpm_table dpm_table;
- struct tonga_dpm_table golden_dpm_table;
-
- uint32_t voting_rights_clients0;
- uint32_t voting_rights_clients1;
- uint32_t voting_rights_clients2;
- uint32_t voting_rights_clients3;
- uint32_t voting_rights_clients4;
- uint32_t voting_rights_clients5;
- uint32_t voting_rights_clients6;
- uint32_t voting_rights_clients7;
- uint32_t static_screen_threshold_unit;
- uint32_t static_screen_threshold;
- uint32_t voltage_control;
- uint32_t vdd_gfx_control;
-
- uint32_t vddc_vddci_delta;
- uint32_t vddc_vddgfx_delta;
-
- struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
- struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
- struct pp_interrupt_registration_info smc_to_host_interrupt_info;
- uint32_t active_auto_throttle_sources;
-
- struct pp_interrupt_registration_info external_throttle_interrupt;
- irq_handler_func_t external_throttle_callback;
- void *external_throttle_context;
-
- struct pp_interrupt_registration_info ctf_interrupt_info;
- irq_handler_func_t ctf_callback;
- void *ctf_context;
-
- phw_tonga_clock_registers clock_registers;
- phw_tonga_voltage_smio_registers voltage_smio_registers;
-
- bool is_memory_GDDR5;
- uint16_t acpi_vddc;
- bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
- uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
- uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
- uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
- uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
- uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
- phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
- phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
- phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
-
- uint32_t mvdd_control;
- uint32_t vddc_mask_low;
- uint32_t mvdd_mask_low;
- uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
- uint16_t min_vddc_in_pp_table;
- uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
- uint16_t min_vddci_in_pp_table;
- uint32_t mclk_strobe_mode_threshold;
- uint32_t mclk_stutter_mode_threshold;
- uint32_t mclk_edc_enable_threshold;
- uint32_t mclk_edc_wr_enable_threshold;
- bool is_uvd_enabled;
- bool is_xdma_enabled;
- phw_tonga_vbios_boot_state vbios_boot_state;
-
- bool battery_state;
- bool is_tlu_enabled;
- bool pcie_performance_request;
-
- /* -------------- SMC SRAM Address of firmware header tables ----------------*/
- uint32_t sram_end; /* The first address after the SMC SRAM. */
- uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
- uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
- uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
- uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
- uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
- SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
- SMU72_Discrete_MCRegisters mc_reg_table;
- SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
- /* -------------- Stuff originally coming from Evergreen --------------------*/
- phw_tonga_mc_reg_table tonga_mc_reg_table;
- uint32_t vdd_ci_control;
- pp_atomctrl_voltage_table vddc_voltage_table;
- pp_atomctrl_voltage_table vddci_voltage_table;
- pp_atomctrl_voltage_table vddgfx_voltage_table;
- pp_atomctrl_voltage_table mvdd_voltage_table;
-
- uint32_t mgcg_cgtt_local2;
- uint32_t mgcg_cgtt_local3;
- uint32_t gpio_debug;
- uint32_t mc_micro_code_feature;
- uint32_t highest_mclk;
- uint16_t acpi_vdd_ci;
- uint8_t mvdd_high_index;
- uint8_t mvdd_low_index;
- bool dll_defaule_on;
- bool performance_request_registered;
-
- /* ----------------- Low Power Features ---------------------*/
- phw_tonga_bacos bacos;
- phw_tonga_ulv_parm ulv;
- /* ----------------- CAC Stuff ---------------------*/
- uint32_t cac_table_start;
- bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
- bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
- bool cac_enabled;
- /* ----------------- DPM2 Parameters ---------------------*/
- uint32_t power_containment_features;
- bool enable_bapm_feature;
- bool enable_tdc_limit_feature;
- bool enable_pkg_pwr_tracking_feature;
- bool disable_uvd_power_tune_feature;
- phw_tonga_pt_defaults *power_tune_defaults;
- SMU72_Discrete_PmFuses power_tune_table;
- uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
- uint32_t fast_watemark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
-
- /* ----------------- Phase Shedding ---------------------*/
- bool vddc_phase_shed_control;
- /* --------------------- DI/DT --------------------------*/
- phw_tonga_display_timing display_timing;
- /* --------- ReadRegistry data for memory and engine clock margins ---- */
- uint32_t engine_clock_data;
- uint32_t memory_clock_data;
- /* -------- Thermal Temperature Setting --------------*/
- phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask;
- uint32_t need_update_smu7_dpm_table;
- uint32_t sclk_dpm_key_disabled;
- uint32_t mclk_dpm_key_disabled;
- uint32_t pcie_dpm_key_disabled;
- uint32_t min_engine_clocks; /* used to store the previous dal min sclock */
- phw_tonga_pcie_perf_range pcie_gen_performance;
- phw_tonga_pcie_perf_range pcie_lane_performance;
- phw_tonga_pcie_perf_range pcie_gen_power_saving;
- phw_tonga_pcie_perf_range pcie_lane_power_saving;
- bool use_pcie_performance_levels;
- bool use_pcie_power_saving_levels;
- uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */
- uint32_t mclk_activity_target;
- uint32_t low_sclk_interrupt_threshold;
- uint32_t last_mclk_dpm_enable_mask;
- bool uvd_enabled;
- uint32_t pcc_monitor_enabled;
-
- /* --------- Power Gating States ------------*/
- bool uvd_power_gated; /* 1: gated, 0:not gated */
- bool vce_power_gated; /* 1: gated, 0:not gated */
- bool samu_power_gated; /* 1: gated, 0:not gated */
- bool acp_power_gated; /* 1: gated, 0:not gated */
- bool pg_acp_init;
-};
-
-typedef struct tonga_hwmgr tonga_hwmgr;
-
-#define TONGA_DPM2_NEAR_TDP_DEC 10
-#define TONGA_DPM2_ABOVE_SAFE_INC 5
-#define TONGA_DPM2_BELOW_SAFE_INC 20
-
-#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */
-
-#define TONGA_DPM2_LTS_TRUNCATE 0
-
-#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */
-
-#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */
-#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */
-
-#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50
-
-#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
-#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12
-#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
-#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
-#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
-
-#define TONGA_VOLTAGE_CONTROL_NONE 0x0
-#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2
-#define TONGA_VOLTAGE_CONTROL_MERGED 0x3
-
-#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */
-
-#define TONGA_UNUSED_GPIO_PIN 0x7F
-
-int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
deleted file mode 100644
index 47ef1ca2d78b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <asm/div64.h>
-#include "tonga_thermal.h"
-#include "tonga_hwmgr.h"
-#include "tonga_smumgr.h"
-#include "tonga_ppsmc.h"
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-/**
-* Get Fan Speed Control Parameters.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pSpeed is the address of the structure where the result is to be placed.
-* @exception Always succeeds except if we cannot zero out the output structure.
-*/
-int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info)
-{
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- fan_speed_info->supports_percent_read = true;
- fan_speed_info->supports_percent_write = true;
- fan_speed_info->min_percent = 0;
- fan_speed_info->max_percent = 100;
-
- if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
- fan_speed_info->supports_rpm_read = true;
- fan_speed_info->supports_rpm_write = true;
- fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
- fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
- } else {
- fan_speed_info->min_rpm = 0;
- fan_speed_info->max_rpm = 0;
- }
-
- return 0;
-}
-
-/**
-* Get Fan Speed in percent.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pSpeed is the address of the structure where the result is to be placed.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
- uint32_t duty100;
- uint32_t duty;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
- duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
- if (0 == duty100)
- return -EINVAL;
-
-
- tmp64 = (uint64_t)duty * 100;
- do_div(tmp64, duty100);
- *speed = (uint32_t)tmp64;
-
- if (*speed > 100)
- *speed = 100;
-
- return 0;
-}
-
-/**
-* Get Fan Speed in RPM.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the address of the structure where the result is to be placed.
-* @exception Returns not supported if no fan is found or if pulses per revolution are not set
-*/
-int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
- return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param hwmgr the address of the powerplay hardware manager.
-* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
- if (hwmgr->fan_ctrl_is_in_default_mode) {
- hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
- hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
- hwmgr->fan_ctrl_is_in_default_mode = false;
- }
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
- return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param hwmgr the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
- if (!hwmgr->fan_ctrl_is_in_default_mode) {
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
- hwmgr->fan_ctrl_is_in_default_mode = true;
- }
-
- return 0;
-}
-
-int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
-/*
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM))
- hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM);
- else
- hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM);
-*/
- } else {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
- }
-/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
- if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0)
- result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \
- hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL);
-*/
- return result;
-}
-
-
-int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
- return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
-}
-
-/**
-* Set Fan Speed in percent.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
- uint32_t duty100;
- uint32_t duty;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return -EINVAL;
-
- if (speed > 100)
- speed = 100;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
- tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (0 == duty100)
- return -EINVAL;
-
- tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
- duty = (uint32_t)tmp64;
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
- return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param hwmgr the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
- result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
- if (0 == result)
- result = tonga_fan_ctrl_start_smc_fan_control(hwmgr);
- } else
- result = tonga_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
- return 0;
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param hwmgr The address of the hardware manager.
-*/
-int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
- int temp;
-
- temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
-/* Bit 9 means the reading is lower than the lowest usable value. */
- if (0 != (0x200 & temp))
- temp = TONGA_THERMAL_MAXIMUM_TEMP_READING;
- else
- temp = (temp & 0x1ff);
-
- temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param hwmgr The address of the hardware manager.
-* @param range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
-{
- uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- if (low < low_temp)
- low = low_temp;
- if (high > high_temp)
- high = high_temp;
-
- if (low > high)
- return -EINVAL;
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
- return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_CTRL, EDGE_PER_REV,
- hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
- return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
- uint32_t alert;
-
- alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
- alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
-
- /* send message to SMU to enable internal thermal interrupts */
- return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
- uint32_t alert;
-
- alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
- alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
-
- /* send message to SMU to disable internal thermal interrupts */
- return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param hwmgr The address of the hardware manager.
-*/
-int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
- int result = tonga_thermal_disable_alert(hwmgr);
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- tonga_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
- return 0;
-
- if (0 == data->fan_table_start) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (0 == duty100) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = tonga_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- fan_table.FanControl_GL_Flag = 1;
-
- res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
-/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
- if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
- res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
- hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
-
- if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
- res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
- hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
-
- if (0 != res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-*/
- return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
- tonga_fan_ctrl_start_smc_fan_control(hwmgr);
- tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
- }
-
- return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
- if (range == NULL)
- return -EINVAL;
-
- return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- return tonga_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- return tonga_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- return tonga_thermal_disable_alert(hwmgr);
-}
-
-static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
- { NULL, tf_tonga_thermal_initialize },
- { NULL, tf_tonga_thermal_set_temperature_range },
- { NULL, tf_tonga_thermal_enable_alert },
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
- { NULL, tf_tonga_thermal_setup_fan_table},
- { NULL, tf_tonga_thermal_start_smc_fan_control},
- { NULL, NULL }
-};
-
-static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- tonga_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
- { NULL, tf_tonga_thermal_disable_alert},
- { NULL, tf_tonga_thermal_set_temperature_range},
- { NULL, tf_tonga_thermal_enable_alert},
- { NULL, NULL }
-};
-
-static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- tonga_thermal_set_temperature_range_master_list
-};
-
-int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
- if (!hwmgr->thermal_controller.fanInfo.bNoFan)
- tonga_fan_ctrl_set_default_mode(hwmgr);
- return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
-
- if (0 == result) {
- result = phm_construct_table(hwmgr,
- &tonga_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (0 != result)
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- }
-
- if (0 == result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
deleted file mode 100644
index aa335f267e25..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_THERMAL_H
-#define TONGA_THERMAL_H
-
-#include "hwmgr.h"
-
-#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1
-#define TONGA_THERMAL_LOW_ALERT_MASK 0x2
-
-#define TONGA_THERMAL_MINIMUM_TEMP_READING -256
-#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255
-
-#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0
-#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index b764c8c05ec8..3fb5e57a378b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,6 +29,19 @@
#include "amd_shared.h"
#include "cgs_common.h"
+enum amd_pp_sensors {
+ AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+ AMDGPU_PP_SENSOR_VDDNB,
+ AMDGPU_PP_SENSOR_VDDGFX,
+ AMDGPU_PP_SENSOR_UVD_VCLK,
+ AMDGPU_PP_SENSOR_UVD_DCLK,
+ AMDGPU_PP_SENSOR_VCE_ECCLK,
+ AMDGPU_PP_SENSOR_GPU_LOAD,
+ AMDGPU_PP_SENSOR_GFX_MCLK,
+ AMDGPU_PP_SENSOR_GPU_TEMP,
+ AMDGPU_PP_SENSOR_VCE_POWER,
+ AMDGPU_PP_SENSOR_UVD_POWER,
+};
enum amd_pp_event {
AMD_PP_EVENT_INITIALIZE = 0,
@@ -131,9 +144,8 @@ struct amd_pp_init {
struct cgs_device *device;
uint32_t chip_family;
uint32_t chip_id;
- uint32_t rev_id;
- bool powercontainment_enabled;
};
+
enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_None = 0,
AMD_PP_DisplayConfigType_DP54 ,
@@ -261,6 +273,7 @@ enum amd_pp_clock_type {
struct amd_pp_clocks {
uint32_t count;
uint32_t clock[MAX_NUM_CLOCKS];
+ uint32_t latency[MAX_NUM_CLOCKS];
};
@@ -332,8 +345,6 @@ struct amd_powerplay_funcs {
int (*powergate_uvd)(void *handle, bool gate);
int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
void *input, void *output);
- void (*print_current_performance_level)(void *handle,
- struct seq_file *m);
int (*set_fan_control_mode)(void *handle, uint32_t mode);
int (*get_fan_control_mode)(void *handle);
int (*set_fan_speed_percent)(void *handle, uint32_t percent);
@@ -347,6 +358,7 @@ struct amd_powerplay_funcs {
int (*set_sclk_od)(void *handle, uint32_t value);
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
+ int (*read_sensor)(void *handle, int idx, int32_t *value);
};
struct amd_powerplay {
@@ -378,4 +390,6 @@ int amd_powerplay_get_clock_by_type(void *handle,
int amd_powerplay_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *output);
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
+
#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 962cb5385951..d4495839c64c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -341,7 +341,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
-extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index bf0d2accf7bf..4f0fedd1e9d3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -31,15 +31,20 @@
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
+#include "power_state.h"
struct pp_instance;
struct pp_hwmgr;
-struct pp_hw_power_state;
-struct pp_power_state;
-struct PP_VCEState;
struct phm_fan_speed_info;
struct pp_atomctrl_voltage_table;
+extern int amdgpu_powercontainment;
+extern int amdgpu_sclk_deep_sleep_en;
+extern unsigned amdgpu_pp_feature_mask;
+
+#define VOLTAGE_SCALE 4
+
+uint8_t convert_to_vid(uint16_t vddc);
enum DISPLAY_GAP {
DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */
@@ -49,7 +54,6 @@ enum DISPLAY_GAP {
};
typedef enum DISPLAY_GAP DISPLAY_GAP;
-
struct vi_dpm_level {
bool enabled;
uint32_t value;
@@ -71,6 +75,19 @@ enum PP_Result {
#define PCIE_PERF_REQ_GEN2 3
#define PCIE_PERF_REQ_GEN3 4
+enum PP_FEATURE_MASK {
+ PP_SCLK_DPM_MASK = 0x1,
+ PP_MCLK_DPM_MASK = 0x2,
+ PP_PCIE_DPM_MASK = 0x4,
+ PP_SCLK_DEEP_SLEEP_MASK = 0x8,
+ PP_POWER_CONTAINMENT_MASK = 0x10,
+ PP_UVD_HANDSHAKE_MASK = 0x20,
+ PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
+ PP_VBI_TIME_SUPPORT_MASK = 0x80,
+ PP_ULV_MASK = 0x100,
+ PP_ENABLE_GFX_CG_THRU_SMU = 0x200
+};
+
enum PHM_BackEnd_Magic {
PHM_Dummy_Magic = 0xAA5555AA,
PHM_RV770_Magic = 0xDCBAABCD,
@@ -294,8 +311,6 @@ struct pp_hwmgr_func {
int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
int (*power_state_set)(struct pp_hwmgr *hwmgr,
const void *state);
- void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr,
- struct seq_file *m);
int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
int (*display_config_changed)(struct pp_hwmgr *hwmgr);
@@ -342,6 +357,7 @@ struct pp_hwmgr_func {
int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+ int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value);
};
struct pp_table_func {
@@ -351,7 +367,7 @@ struct pp_table_func {
int (*pptable_get_vce_state_table_entry)(
struct pp_hwmgr *hwmgr,
unsigned long i,
- struct PP_VCEState *vce_state,
+ struct pp_vce_state *vce_state,
void **clock_info,
unsigned long *flag);
};
@@ -570,22 +586,43 @@ struct phm_microcode_version_info {
uint32_t NB;
};
+#define PP_MAX_VCE_LEVELS 6
+
+enum PP_VCE_LEVEL {
+ PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+
+enum PP_TABLE_VERSION {
+ PP_TABLE_V0 = 0,
+ PP_TABLE_V1,
+ PP_TABLE_V2,
+ PP_TABLE_MAX
+};
+
/**
* The main hardware manager structure.
*/
struct pp_hwmgr {
uint32_t chip_family;
uint32_t chip_id;
- uint32_t hw_revision;
- uint32_t sub_sys_id;
- uint32_t sub_vendor_id;
+ uint32_t pp_table_version;
void *device;
struct pp_smumgr *smumgr;
const void *soft_pp_table;
uint32_t soft_pp_table_size;
void *hardcode_pp_table;
bool need_pp_table_upload;
+
+ struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+ uint32_t num_vce_state_tables;
+
enum amd_dpm_forced_level dpm_level;
bool block_hw_access;
struct phm_gfx_arbiter gfx_arbiter;
@@ -614,7 +651,6 @@ struct pp_hwmgr {
uint32_t num_ps;
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
- bool powercontainment_enabled;
uint32_t fan_ctrl_default_mode;
uint32_t tmin;
struct phm_microcode_version_info microcode_version_info;
@@ -624,6 +660,7 @@ struct pp_hwmgr {
struct pp_power_state *boot_ps;
struct pp_power_state *uvd_ps;
struct amd_pp_display_configuration display_config;
+ uint32_t feature_mask;
};
@@ -637,16 +674,7 @@ extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr);
extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
uint32_t value, uint32_t mask);
-extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t index, uint32_t value, uint32_t mask);
-extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port, uint32_t index);
-
-extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value);
extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
@@ -654,12 +682,7 @@ extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t value,
uint32_t mask);
-extern void phm_wait_for_indirect_register_unequal(
- struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask);
+
extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
@@ -673,6 +696,8 @@ extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, st
extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+ uint32_t voltage);
extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
@@ -683,6 +708,10 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
+extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t id, uint16_t *voltage);
+
#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
@@ -697,44 +726,6 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
PHM_FIELD_SHIFT(reg, field))
-#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask) \
- phm_wait_on_register(hwmgr, index, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask) \
- phm_wait_for_register_unequal(hwmgr, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-/* Operations on named registers. */
-
-#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask) \
- PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
- PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
/* Operations on named fields. */
#define PHM_READ_FIELD(device, reg, field) \
@@ -762,60 +753,16 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
reg, field, fieldval))
-#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval) \
- PHM_WAIT_REGISTER(hwmgr, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
- PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \
+#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-/* Operations on arrays of registers & fields. */
-
-#define PHM_READ_ARRAY_REGISTER(device, reg, offset) \
- cgs_read_register(device, mm##reg + (offset))
-
-#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value) \
- cgs_write_register(device, mm##reg + (offset), value)
-
-#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask) \
- PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask) \
- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \
- PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field)
-
-#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \
- PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset, \
- PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), \
- reg, field, fieldvalue))
-
-#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \
- PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), \
- (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue) \
- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), \
- (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
index f497e7d98e6d..0de443612312 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
@@ -23,8 +23,7 @@
#ifndef _POLARIS10_PWRVIRUS_H
#define _POLARIS10_PWRVIRUS_H
-#define mmSMC_IND_INDEX_11 0x01AC
-#define mmSMC_IND_DATA_11 0x01AD
+
#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index a3f0ce4d5835..9ceaed9ac52a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -158,7 +158,7 @@ struct pp_power_state {
/*Structure to hold a VCE state entry*/
-struct PP_VCEState {
+struct pp_vce_state {
uint32_t evclk;
uint32_t ecclk;
uint32_t sclk;
@@ -171,30 +171,28 @@ enum PP_MMProfilingState {
PP_MMProfilingState_Stopped
};
-struct PP_Clock_Engine_Request {
- unsigned long clientType;
- unsigned long ctxid;
+struct pp_clock_engine_request {
+ unsigned long client_type;
+ unsigned long ctx_id;
uint64_t context_handle;
unsigned long sclk;
- unsigned long sclkHardMin;
+ unsigned long sclk_hard_min;
unsigned long mclk;
unsigned long iclk;
unsigned long evclk;
unsigned long ecclk;
- unsigned long ecclkHardMin;
+ unsigned long ecclk_hard_min;
unsigned long vclk;
unsigned long dclk;
- unsigned long samclk;
- unsigned long acpclk;
- unsigned long sclkOverdrive;
- unsigned long mclkOverdrive;
+ unsigned long sclk_over_drive;
+ unsigned long mclk_over_drive;
unsigned long sclk_threshold;
unsigned long flag;
unsigned long vclk_ceiling;
unsigned long dclk_ceiling;
unsigned long num_cus;
- unsigned long pmflag;
- enum PP_MMProfilingState MMProfilingState;
+ unsigned long pm_flag;
+ enum PP_MMProfilingState mm_profiling_state;
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index d7d83b7c7f95..bfdbec10cdd5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -43,5 +43,8 @@
} while (0)
+#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n) \
+ (type *)((char *)&(ptr)->member + (sizeof(type) * (n)))
+
#endif /* PP_DEBUG_H */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
new file mode 100644
index 000000000000..71c9b2d28640
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
@@ -0,0 +1,510 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_H
+#define SMU71_H
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__VARIANT__ICELAND 1
+#define SMU__DGPU_ONLY 1
+#define SMU__DYNAMIC_MCARB_SETTINGS 1
+
+enum SID_OPTION {
+ SID_OPTION_HI,
+ SID_OPTION_LO,
+ SID_OPTION_COUNT
+};
+
+typedef struct {
+ uint32_t high;
+ uint32_t low;
+} data_64_t;
+
+typedef struct {
+ data_64_t high;
+ data_64_t low;
+} data_128_t;
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+#define SMU71_MAX_LEVELS_VDDC 8
+#define SMU71_MAX_LEVELS_VDDCI 4
+#define SMU71_MAX_LEVELS_MVDD 4
+#define SMU71_MAX_LEVELS_VDDNB 8
+
+#define SMU71_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE
+#define SMU71_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS
+#define SMU71_MAX_ENTRIES_SMIO 32
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
+
+#define GPIO_CLAMP_MODE_VRHOT 1
+#define GPIO_CLAMP_MODE_THERM 2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+#if defined SMU__DGPU_ONLY
+#define SMU71_DTE_ITERATIONS 5
+#define SMU71_DTE_SOURCES 3
+#define SMU71_DTE_SINKS 1
+#define SMU71_NUM_CPU_TES 0
+#define SMU71_NUM_GPU_TES 1
+#define SMU71_NUM_NON_TES 2
+
+#endif
+
+#if defined SMU__FUSION_ONLY
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+#endif
+
+struct SMU71_PIDController
+{
+ uint32_t Ki;
+ int32_t LFWindupUpperLim;
+ int32_t LFWindupLowerLim;
+ uint32_t StatePrecision;
+ uint32_t LfPrecision;
+ uint32_t LfOffset;
+ uint32_t MaxState;
+ uint32_t MaxLfFraction;
+ uint32_t StateShift;
+};
+
+typedef struct SMU71_PIDController SMU71_PIDController;
+
+struct SMU7_LocalDpmScoreboard
+{
+ uint32_t PercentageBusy;
+
+ int32_t PIDError;
+ int32_t PIDIntegral;
+ int32_t PIDOutput;
+
+ uint32_t SigmaDeltaAccum;
+ uint32_t SigmaDeltaOutput;
+ uint32_t SigmaDeltaLevel;
+
+ uint32_t UtilizationSetpoint;
+
+ uint8_t TdpClampMode;
+ uint8_t TdcClampMode;
+ uint8_t ThermClampMode;
+ uint8_t VoltageBusy;
+
+ int8_t CurrLevel;
+ int8_t TargLevel;
+ uint8_t LevelChangeInProgress;
+ uint8_t UpHyst;
+
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+
+ uint32_t MinimumPerfSclk;
+
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t GfxClkSlow;
+ uint8_t GpioClampMode;
+
+ uint8_t FpsFilterWeight;
+ uint8_t EnabledLevelsChange;
+ uint8_t DteClampMode;
+ uint8_t FpsClampMode;
+
+ uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS];
+
+ void (*TargetStateCalculator)(uint8_t);
+ void (*SavedTargetStateCalculator)(uint8_t);
+
+ uint16_t AutoDpmInterval;
+ uint16_t AutoDpmRange;
+
+ uint8_t FpsEnabled;
+ uint8_t MaxPerfLevel;
+ uint8_t AllowLowClkInterruptToHost;
+ uint8_t FpsRunning;
+
+ uint32_t MaxAllowedFrequency;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+struct SMU7_VoltageScoreboard
+{
+ uint16_t CurrentVoltage;
+ uint16_t HighestVoltage;
+ uint16_t MaxVid;
+ uint8_t HighestVidOffset;
+ uint8_t CurrentVidOffset;
+#if defined (SMU__DGPU_ONLY)
+ uint8_t CurrentPhases;
+ uint8_t HighestPhases;
+#else
+ uint8_t AvsOffset;
+ uint8_t AvsOffsetApplied;
+#endif
+ uint8_t ControllerBusy;
+ uint8_t CurrentVid;
+ uint16_t RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+#if defined (SMU__DGPU_ONLY)
+ uint8_t RequestedPhases[SMU7_MAX_VOLTAGE_CLIENTS];
+#endif
+ uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+ uint8_t TargetIndex;
+ uint8_t Delay;
+ uint8_t ControllerEnable;
+ uint8_t ControllerRunning;
+ uint16_t CurrentStdVoltageHiSidd;
+ uint16_t CurrentStdVoltageLoSidd;
+#if defined (SMU__DGPU_ONLY)
+ uint16_t RequestedVddci;
+ uint16_t CurrentVddci;
+ uint16_t HighestVddci;
+ uint8_t CurrentVddciVid;
+ uint8_t TargetVddciIndex;
+#endif
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+struct SMU7_PCIeLinkSpeedScoreboard
+{
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+
+ uint8_t CurrentLinkSpeed;
+ uint8_t EnabledLevelsChange;
+ uint16_t AutoDpmInterval;
+
+ uint16_t AutoDpmRange;
+ uint16_t AutoDpmCount;
+
+ uint8_t DpmMode;
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t CurrentLinkLevel;
+
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+// -------------------------------------------------------- CAC table ------------------------------------------------------
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I 7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard
+{
+ uint16_t MinVoltage;
+ uint16_t MaxVoltage;
+
+ uint32_t AvgGpuPower;
+
+ uint16_t VddcLeakagePower[SID_OPTION_COUNT];
+ uint16_t VddcSclkConstantPower[SID_OPTION_COUNT];
+ uint16_t VddcSclkDynamicPower[SID_OPTION_COUNT];
+ uint16_t VddcNonSclkDynamicPower[SID_OPTION_COUNT];
+ uint16_t VddcTotalPower[SID_OPTION_COUNT];
+ uint16_t VddcTotalCurrent[SID_OPTION_COUNT];
+ uint16_t VddcLoadVoltage[SID_OPTION_COUNT];
+ uint16_t VddcNoLoadVoltage[SID_OPTION_COUNT];
+
+ uint16_t DisplayPhyPower;
+ uint16_t PciePhyPower;
+
+ uint16_t VddciTotalPower;
+ uint16_t Vddr1TotalPower;
+
+ uint32_t RocPower;
+
+ uint32_t last_power;
+ uint32_t enableWinAvg;
+
+ uint32_t lkg_acc;
+ uint16_t VoltLkgeScaler;
+ uint16_t TempLkgeScaler;
+
+ uint32_t uvd_cac_dclk;
+ uint32_t uvd_cac_vclk;
+ uint32_t vce_cac_eclk;
+ uint32_t samu_cac_samclk;
+ uint32_t display_cac_dispclk;
+ uint32_t acp_cac_aclk;
+ uint32_t unb_cac;
+
+ uint32_t WinTime;
+
+ uint16_t GpuPwr_MAWt;
+ uint16_t FilteredVddcTotalPower;
+
+ uint8_t CalculationRepeats;
+ uint8_t WaterfallUp;
+ uint8_t WaterfallDown;
+ uint8_t WaterfallLimit;
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+// --------------------------------------------------------------------------------------------------
+
+struct SMU7_ThermalScoreboard
+{
+ int16_t GpuLimit;
+ int16_t GpuHyst;
+ uint16_t CurrGnbTemp;
+ uint16_t FilteredGnbTemp;
+ uint8_t ControllerEnable;
+ uint8_t ControllerRunning;
+ uint8_t WaterfallUp;
+ uint8_t WaterfallDown;
+ uint8_t WaterfallLimit;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
+
+// For FeatureEnables:
+#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
+#define SMU7_UVD_DPM_CONFIG_MASK 0x10
+#define SMU7_VCE_DPM_CONFIG_MASK 0x20
+#define SMU7_ACP_DPM_CONFIG_MASK 0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
+
+// All 'soft registers' should be uint32_t.
+struct SMU71_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerPeriod;
+ uint32_t FeatureEnables;
+#if defined (SMU__DGPU_ONLY)
+ uint32_t PreVBlankGap;
+ uint32_t VBlankTimeout;
+ uint32_t TrainTimeGap;
+ uint32_t MvddSwitchTime;
+ uint32_t LongestAcpiTrainTime;
+ uint32_t AcpiDelay;
+ uint32_t G5TrainTime;
+ uint32_t DelayMpllPwron;
+ uint32_t VoltageChangeTimeout;
+#endif
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsActivity;
+ uint32_t AverageMemoryActivity;
+ uint32_t AverageGioActivity;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterCount;
+ uint32_t UlvTime;
+ uint32_t UcodeLoadStatus;
+ uint8_t DPMFreezeAndForced;
+ uint8_t Activity_Weight;
+ uint8_t Reserved8[2];
+ uint32_t Reserved;
+};
+
+typedef struct SMU71_SoftRegisters SMU71_SoftRegisters;
+
+struct SMU71_Firmware_Header
+{
+ uint32_t Digest[5];
+ uint32_t Version;
+ uint32_t HeaderSize;
+ uint32_t Flags;
+ uint32_t EntryPoint;
+ uint32_t CodeSize;
+ uint32_t ImageSize;
+
+ uint32_t Rtos;
+ uint32_t SoftRegisters;
+ uint32_t DpmTable;
+ uint32_t FanTable;
+ uint32_t CacConfigTable;
+ uint32_t CacStatusTable;
+
+ uint32_t mcRegisterTable;
+
+ uint32_t mcArbDramTimingTable;
+
+ uint32_t PmFuseTable;
+ uint32_t Globals;
+ uint32_t UvdDpmTable;
+ uint32_t AcpDpmTable;
+ uint32_t VceDpmTable;
+ uint32_t SamuDpmTable;
+ uint32_t UlvSettings;
+ uint32_t Reserved[37];
+ uint32_t Signature;
+};
+
+typedef struct SMU71_Firmware_Header SMU71_Firmware_Header;
+
+struct SMU7_HystController_Data
+{
+ uint8_t waterfall_up;
+ uint8_t waterfall_down;
+ uint8_t pstate;
+ uint8_t clamp_mode;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+#define SMU71_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum DisplayConfig {
+ PowerDown = 1,
+ DP54x4,
+ DP54x2,
+ DP54x1,
+ DP27x4,
+ DP27x2,
+ DP27x1,
+ HDMI297,
+ HDMI162,
+ LVDS,
+ DP324x4,
+ DP324x2,
+ DP324x1
+};
+
+//#define SX_BLOCK_COUNT 8
+//#define MC_BLOCK_COUNT 1
+//#define CPL_BLOCK_COUNT 27
+
+#if defined SMU__VARIANT__ICELAND
+ #define SX_BLOCK_COUNT 8
+ #define MC_BLOCK_COUNT 1
+ #define CPL_BLOCK_COUNT 29
+#endif
+
+struct SMU7_Local_Cac {
+ uint8_t BlockId;
+ uint8_t SignalId;
+ uint8_t Threshold;
+ uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+ SMU7_Local_Cac SxLocalCac[SX_BLOCK_COUNT];
+ SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+ SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
new file mode 100644
index 000000000000..c0e3936d5c2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_DISCRETE_H
+#define SMU71_DISCRETE_H
+
+#include "smu71.h"
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define VDDC_ON_SVI2 0x1
+#define VDDCI_ON_SVI2 0x2
+#define MVDD_ON_SVI2 0x4
+
+struct SMU71_Discrete_VoltageLevel
+{
+ uint16_t Voltage;
+ uint16_t StdVoltageHiSidd;
+ uint16_t StdVoltageLoSidd;
+ uint8_t Smio;
+ uint8_t padding;
+};
+
+typedef struct SMU71_Discrete_VoltageLevel SMU71_Discrete_VoltageLevel;
+
+struct SMU71_Discrete_GraphicsLevel
+{
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+
+ uint32_t SclkFrequency;
+
+ uint8_t pcieDpmLevel;
+ uint8_t DeepSleepDivId;
+ uint16_t ActivityLevel;
+
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpHyst;
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t PowerThrottle;
+};
+
+typedef struct SMU71_Discrete_GraphicsLevel SMU71_Discrete_GraphicsLevel;
+
+struct SMU71_Discrete_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding;
+ uint32_t CgSpllFuncCntl;
+ uint32_t CgSpllFuncCntl2;
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+};
+
+typedef struct SMU71_Discrete_ACPILevel SMU71_Discrete_ACPILevel;
+
+struct SMU71_Discrete_Ulv
+{
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint16_t VddcOffset;
+ uint8_t VddcOffsetVid;
+ uint8_t VddcPhase;
+ uint32_t Reserved;
+};
+
+typedef struct SMU71_Discrete_Ulv SMU71_Discrete_Ulv;
+
+struct SMU71_Discrete_MemoryLevel
+{
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t MinVddci;
+ uint32_t MinMvdd;
+
+ uint32_t MclkFrequency;
+
+ uint8_t EdcReadEnable;
+ uint8_t EdcWriteEnable;
+ uint8_t RttEnable;
+ uint8_t StutterEnable;
+
+ uint8_t StrobeEnable;
+ uint8_t StrobeRatio;
+ uint8_t EnabledForThrottle;
+ uint8_t EnabledForActivity;
+
+ uint8_t UpHyst;
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t padding;
+
+ uint16_t ActivityLevel;
+ uint8_t DisplayWatermark;
+ uint8_t padding1;
+
+ uint32_t MpllFuncCntl;
+ uint32_t MpllFuncCntl_1;
+ uint32_t MpllFuncCntl_2;
+ uint32_t MpllAdFuncCntl;
+ uint32_t MpllDqFuncCntl;
+ uint32_t MclkPwrmgtCntl;
+ uint32_t DllCntl;
+ uint32_t MpllSs1;
+ uint32_t MpllSs2;
+};
+
+typedef struct SMU71_Discrete_MemoryLevel SMU71_Discrete_MemoryLevel;
+
+struct SMU71_Discrete_LinkLevel
+{
+ uint8_t PcieGenSpeed; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3
+ uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
+ uint8_t EnabledForActivity;
+ uint8_t SPC;
+ uint32_t DownThreshold;
+ uint32_t UpThreshold;
+ uint32_t Reserved;
+};
+
+typedef struct SMU71_Discrete_LinkLevel SMU71_Discrete_LinkLevel;
+
+
+#ifdef SMU__DYNAMIC_MCARB_SETTINGS
+// MC ARB DRAM Timing registers.
+struct SMU71_Discrete_MCArbDramTimingTableEntry
+{
+ uint32_t McArbDramTiming;
+ uint32_t McArbDramTiming2;
+ uint8_t McArbBurstTime;
+ uint8_t padding[3];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTableEntry SMU71_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU71_Discrete_MCArbDramTimingTable
+{
+ SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTable SMU71_Discrete_MCArbDramTimingTable;
+#endif
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU71_Discrete_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddc;
+ uint8_t MinVddcPhases;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+ uint8_t padding[3];
+};
+
+typedef struct SMU71_Discrete_UvdLevel SMU71_Discrete_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU71_Discrete_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t MinPhases;
+ uint8_t Divider;
+};
+
+typedef struct SMU71_Discrete_ExtClkLevel SMU71_Discrete_ExtClkLevel;
+
+// Everything that we need to keep track of about the current state.
+// Use this instead of copies of the GraphicsLevel and MemoryLevel structures to keep track of state parameters
+// that need to be checked later.
+// We don't need to cache everything about a state, just a few parameters.
+struct SMU71_Discrete_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t MclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint16_t MvddVoltage;
+ uint16_t padding16;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ uint8_t McRegIndex;
+ uint8_t SeqIndex;
+ uint8_t SclkDid;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+ uint8_t PCIeGen;
+
+};
+
+typedef struct SMU71_Discrete_StateInfo SMU71_Discrete_StateInfo;
+
+
+struct SMU71_Discrete_DpmTable
+{
+ // Multi-DPM controller settings
+ SMU71_PIDController GraphicsPIDController;
+ SMU71_PIDController MemoryPIDController;
+ SMU71_PIDController LinkPIDController;
+
+ uint32_t SystemFlags;
+
+ // SMIO masks for voltage and phase controls
+ uint32_t SmioMaskVddcVid;
+ uint32_t SmioMaskVddcPhase;
+ uint32_t SmioMaskVddciVid;
+ uint32_t SmioMaskMvddVid;
+
+ uint32_t VddcLevelCount;
+ uint32_t VddciLevelCount;
+ uint32_t MvddLevelCount;
+
+ SMU71_Discrete_VoltageLevel VddcLevel [SMU71_MAX_LEVELS_VDDC];
+ SMU71_Discrete_VoltageLevel VddciLevel [SMU71_MAX_LEVELS_VDDCI];
+ SMU71_Discrete_VoltageLevel MvddLevel [SMU71_MAX_LEVELS_MVDD];
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t MemoryDpmLevelCount;
+ uint8_t LinkLevelCount;
+ uint8_t MasterDeepSleepControl;
+
+ uint32_t Reserved[5];
+
+ // State table entries for each DPM state
+ SMU71_Discrete_GraphicsLevel GraphicsLevel [SMU71_MAX_LEVELS_GRAPHICS];
+ SMU71_Discrete_MemoryLevel MemoryACPILevel;
+ SMU71_Discrete_MemoryLevel MemoryLevel [SMU71_MAX_LEVELS_MEMORY];
+ SMU71_Discrete_LinkLevel LinkLevel [SMU71_MAX_LEVELS_LINK];
+ SMU71_Discrete_ACPILevel ACPILevel;
+
+ uint32_t SclkStepSize;
+ uint32_t Smio [SMU71_MAX_ENTRIES_SMIO];
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsVoltageChangeEnable;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsInterval;
+
+ uint8_t VoltageInterval;
+ uint8_t ThermalInterval;
+ uint16_t TemperatureLimitHigh;
+
+ uint16_t TemperatureLimitLow;
+ uint8_t MemoryBootLevel;
+ uint8_t MemoryVoltageChangeEnable;
+
+ uint8_t MemoryInterval;
+ uint8_t MemoryThermThrottleEnable;
+ uint8_t MergedVddci;
+ uint8_t padding2;
+
+ uint16_t VoltageResponseTime;
+ uint16_t PhaseResponseTime;
+
+ uint8_t PCIeBootLinkLevel;
+ uint8_t PCIeGenInterval;
+ uint8_t DTEInterval;
+ uint8_t DTEMode;
+
+ uint8_t SVI2Enable;
+ uint8_t VRHotGpio;
+ uint8_t AcDcGpio;
+ uint8_t ThermGpio;
+
+ uint32_t DisplayCac;
+
+ uint16_t MaxPwr;
+ uint16_t NomPwr;
+
+ uint16_t FpsHighThreshold;
+ uint16_t FpsLowThreshold;
+
+ uint16_t BAPMTI_R [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+ uint16_t BAPMTI_RC [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+
+ uint8_t DTEAmbientTempBase;
+ uint8_t DTETjOffset;
+ uint8_t GpuTjMax;
+ uint8_t GpuTjHyst;
+
+ uint16_t BootVddc;
+ uint16_t BootVddci;
+
+ uint16_t BootMVdd;
+ uint16_t padding;
+
+ uint32_t BAPM_TEMP_GRADIENT;
+
+ uint32_t LowSclkInterruptThreshold;
+ uint32_t VddGfxReChkWait;
+
+ uint16_t PPM_PkgPwrLimit;
+ uint16_t PPM_TemperatureLimit;
+
+ uint16_t DefaultTdp;
+ uint16_t TargetTdp;
+};
+
+typedef struct SMU71_Discrete_DpmTable SMU71_Discrete_DpmTable;
+
+// --------------------------------------------------- AC Timing Parameters ------------------------------------------------
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU71_MAX_LEVELS_MEMORY
+
+struct SMU71_Discrete_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMU71_Discrete_MCRegisterAddress SMU71_Discrete_MCRegisterAddress;
+
+struct SMU71_Discrete_MCRegisterSet
+{
+ uint32_t value[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU71_Discrete_MCRegisterSet SMU71_Discrete_MCRegisterSet;
+
+struct SMU71_Discrete_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMU71_Discrete_MCRegisterAddress address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+ SMU71_Discrete_MCRegisterSet data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU71_Discrete_MCRegisters SMU71_Discrete_MCRegisters;
+
+
+// --------------------------------------------------- Fan Table -----------------------------------------------------------
+struct SMU71_Discrete_FanTable
+{
+ uint16_t FdoMode;
+ int16_t TempMin;
+ int16_t TempMed;
+ int16_t TempMax;
+ int16_t Slope1;
+ int16_t Slope2;
+ int16_t FdoMin;
+ int16_t HystUp;
+ int16_t HystDown;
+ int16_t HystSlope;
+ int16_t TempRespLim;
+ int16_t TempCurr;
+ int16_t SlopeCurr;
+ int16_t PwmCurr;
+ uint32_t RefreshPeriod;
+ int16_t FdoMax;
+ uint8_t TempSrc;
+ int8_t Padding;
+};
+
+typedef struct SMU71_Discrete_FanTable SMU71_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+struct SMU71_MclkDpmScoreboard
+{
+
+ uint32_t PercentageBusy;
+
+ int32_t PIDError;
+ int32_t PIDIntegral;
+ int32_t PIDOutput;
+
+ uint32_t SigmaDeltaAccum;
+ uint32_t SigmaDeltaOutput;
+ uint32_t SigmaDeltaLevel;
+
+ uint32_t UtilizationSetpoint;
+
+ uint8_t TdpClampMode;
+ uint8_t TdcClampMode;
+ uint8_t ThermClampMode;
+ uint8_t VoltageBusy;
+
+ int8_t CurrLevel;
+ int8_t TargLevel;
+ uint8_t LevelChangeInProgress;
+ uint8_t UpHyst;
+
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+
+ uint32_t MinimumPerfMclk;
+
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t MclkSwitchInProgress;
+ uint8_t MclkSwitchCritical;
+
+ uint8_t TargetMclkIndex;
+ uint8_t TargetMvddIndex;
+ uint8_t MclkSwitchResult;
+
+ uint8_t EnabledLevelsChange;
+
+ uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_MEMORY];
+ uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_MEMORY];
+
+ void (*TargetStateCalculator)(uint8_t);
+ void (*SavedTargetStateCalculator)(uint8_t);
+
+ uint16_t AutoDpmInterval;
+ uint16_t AutoDpmRange;
+
+ uint16_t MclkSwitchingTime;
+ uint8_t padding[2];
+};
+
+typedef struct SMU71_MclkDpmScoreboard SMU71_MclkDpmScoreboard;
+
+struct SMU71_UlvScoreboard
+{
+ uint8_t EnterUlv;
+ uint8_t ExitUlv;
+ uint8_t UlvActive;
+ uint8_t WaitingForUlv;
+ uint8_t UlvEnable;
+ uint8_t UlvRunning;
+ uint8_t UlvMasterEnable;
+ uint8_t padding;
+ uint32_t UlvAbortedCount;
+ uint32_t UlvTimeStamp;
+};
+
+typedef struct SMU71_UlvScoreboard SMU71_UlvScoreboard;
+
+struct SMU71_VddGfxScoreboard
+{
+ uint8_t VddGfxEnable;
+ uint8_t VddGfxActive;
+ uint8_t padding[2];
+
+ uint32_t VddGfxEnteredCount;
+ uint32_t VddGfxAbortedCount;
+};
+
+typedef struct SMU71_VddGfxScoreboard SMU71_VddGfxScoreboard;
+
+struct SMU71_AcpiScoreboard {
+ uint32_t SavedInterruptMask[2];
+ uint8_t LastACPIRequest;
+ uint8_t CgBifResp;
+ uint8_t RequestType;
+ uint8_t Padding;
+ SMU71_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU71_AcpiScoreboard SMU71_AcpiScoreboard;
+
+
+struct SMU71_Discrete_PmFuses {
+ // dw0-dw1
+ uint8_t BapmVddCVidHiSidd[8];
+
+ // dw2-dw3
+ uint8_t BapmVddCVidLoSidd[8];
+
+ // dw4-dw5
+ uint8_t VddCVid[8];
+
+ // dw6
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t SviLoadLineTrimVddC;
+ uint8_t SviLoadLineOffsetVddC;
+
+ // dw7
+ uint16_t TDC_VDDC_PkgLimit;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+
+ // dw8
+ uint8_t TdcWaterfallCtl;
+ uint8_t LPMLTemperatureMin;
+ uint8_t LPMLTemperatureMax;
+ uint8_t Reserved;
+
+ // dw9-dw12
+ uint8_t LPMLTemperatureScaler[16];
+
+ // dw13-dw14
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t Reserved6;
+
+ // dw15
+ uint8_t GnbLPML[16];
+
+ // dw15
+ uint8_t GnbLPMLMaxVid;
+ uint8_t GnbLPMLMinVid;
+ uint8_t Reserved1[2];
+
+ // dw16
+ uint16_t BapmVddCBaseLeakageHiSidd;
+ uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU71_Discrete_PmFuses SMU71_Discrete_PmFuses;
+
+struct SMU71_Discrete_Log_Header_Table {
+ uint32_t version;
+ uint32_t asic_id;
+ uint16_t flags;
+ uint16_t entry_size;
+ uint32_t total_size;
+ uint32_t num_of_entries;
+ uint8_t type;
+ uint8_t mode;
+ uint8_t filler_0[2];
+ uint32_t filler_1[2];
+};
+
+typedef struct SMU71_Discrete_Log_Header_Table SMU71_Discrete_Log_Header_Table;
+
+struct SMU71_Discrete_Log_Cntl {
+ uint8_t Enabled;
+ uint8_t Type;
+ uint8_t padding[2];
+ uint32_t BufferSize;
+ uint32_t SamplesLogged;
+ uint32_t SampleSize;
+ uint32_t AddrL;
+ uint32_t AddrH;
+};
+
+typedef struct SMU71_Discrete_Log_Cntl SMU71_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+ #define CAC_ACC_NW_NUM_OF_SIGNALS 83
+#endif
+
+
+struct SMU71_Discrete_Cac_Collection_Table {
+ uint32_t temperature;
+ uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+ uint32_t filler[4];
+};
+
+typedef struct SMU71_Discrete_Cac_Collection_Table SMU71_Discrete_Cac_Collection_Table;
+
+struct SMU71_Discrete_Cac_Verification_Table {
+ uint32_t VddcTotalPower;
+ uint32_t VddcLeakagePower;
+ uint32_t VddcConstantPower;
+ uint32_t VddcGfxDynamicPower;
+ uint32_t VddcUvdDynamicPower;
+ uint32_t VddcVceDynamicPower;
+ uint32_t VddcAcpDynamicPower;
+ uint32_t VddcPcieDynamicPower;
+ uint32_t VddcDceDynamicPower;
+ uint32_t VddcCurrent;
+ uint32_t VddcVoltage;
+ uint32_t VddciTotalPower;
+ uint32_t VddciLeakagePower;
+ uint32_t VddciConstantPower;
+ uint32_t VddciDynamicPower;
+ uint32_t Vddr1TotalPower;
+ uint32_t Vddr1LeakagePower;
+ uint32_t Vddr1ConstantPower;
+ uint32_t Vddr1DynamicPower;
+ uint32_t spare[8];
+ uint32_t temperature;
+};
+
+typedef struct SMU71_Discrete_Cac_Verification_Table SMU71_Discrete_Cac_Verification_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
new file mode 100644
index 000000000000..65eb630bfea3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _PP_COMMON_H
+#define _PP_COMMON_H
+
+#include "smu7_ppsmc.h"
+#include "cgs_common.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+#include "smu74.h"
+#include "smu74_discrete.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
new file mode 100644
index 000000000000..bce00096d80d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef DGPU_VI_PP_SMC_H
+#define DGPU_VI_PP_SMC_H
+
+
+#pragma pack(push, 1)
+
+#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
+
+#define PPSMC_SWSTATE_FLAG_DC 0x01
+#define PPSMC_SWSTATE_FLAG_UVD 0x02
+#define PPSMC_SWSTATE_FLAG_VCE 0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
+#define PPSMC_SYSTEMFLAG_GDDR5 0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
+
+
+#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
+#define PPSMC_DPM2FLAGS_OCP 0x04
+
+
+#define PPSMC_DISPLAY_WATERMARK_LOW 0
+#define PPSMC_DISPLAY_WATERMARK_HIGH 1
+
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
+#define PPSMC_STATEFLAG_POWERBOOST 0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
+#define PPSMC_STATEFLAG_POWERSHIFT 0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
+
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+ FAN_CONTROL_FUZZY,
+ FAN_CONTROL_TABLE
+};
+
+
+#define PPSMC_Result_OK ((uint16_t)0x01)
+#define PPSMC_Result_NoMore ((uint16_t)0x02)
+
+#define PPSMC_Result_NotNow ((uint16_t)0x03)
+#define PPSMC_Result_Failed ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+
+#define PPSMC_MSG_Halt ((uint16_t)0x10)
+#define PPSMC_MSG_Resume ((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
+#define PPSMC_CACHistoryStart ((uint16_t)0x57)
+#define PPSMC_CACHistoryStop ((uint16_t)0x58)
+#define PPSMC_TDPClampingActive ((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
+#define PPSMC_StartFanControl ((uint16_t)0x5B)
+#define PPSMC_StopFanControl ((uint16_t)0x5C)
+#define PPSMC_NoDisplay ((uint16_t)0x5D)
+#define PPSMC_HasDisplay ((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
+#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
+#define PPSMC_OCPActive ((uint16_t)0x6C)
+#define PPSMC_OCPInactive ((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
+
+#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
+#define PPSMC_FlushDataCache ((uint16_t)0x80)
+#define PPSMC_FlushInstrCache ((uint16_t)0x81)
+
+#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
+
+#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
+
+#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
+
+#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
+#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
+#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
+#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test ((uint16_t) 0x100)
+#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
+#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
+#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
+#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
+#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
+#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
+#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
+#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
+#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
+#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
+#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
+#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
+#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
+#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
+#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
+#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
+#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
+#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
+#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
+#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
+#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
+#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
+#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
+#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
+#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
+#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
+#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
+#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
+#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
+#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
+#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
+#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125)
+#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126)
+#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127)
+#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
+
+#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
+#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
+#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
+#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
+#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
+#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
+#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
+#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
+#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
+#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
+#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
+#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
+#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
+#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
+#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
+#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
+#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
+#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
+
+#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
+#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152)
+#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153)
+#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
+#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c)
+#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
+#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
+#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
+#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
+#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
+#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
+#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
+#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
+#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
+#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
+#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
+#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
+#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c)
+#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d)
+#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e)
+#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f)
+#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170)
+#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171)
+#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
+#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
+#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
+#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
+#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
+#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
+#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
+#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
+#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
+#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
+#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
+#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
+#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
+#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
+#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
+#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
+#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
+#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
+#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
+#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
+#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
+#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
+#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
+#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
+#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
+#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
+#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
+#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
+#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
+
+#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
+#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
+#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
+#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
+#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
+#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
+#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
+#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
+
+#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
+#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
+#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
+#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
+#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
+#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
+#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
+
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
+#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
+#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
+#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
+#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
+#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
+#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
+#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
+#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
+#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
+#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
+#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
+#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
+#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
+#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
+#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
+#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
+#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A)
+#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B)
+
+#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C)
+#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275)
+#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277)
+#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400)
+#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401)
+#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402)
+#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
+#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+
+#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
+#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
+#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
+
+#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
+#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
+
+#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+
+#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
+#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
+#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
+#define PPSMC_MSG_GetData ((uint16_t) 0x801)
+#define PPSMC_MSG_SetData ((uint16_t) 0x802)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
+#define PPSMC_EVENT_STATUS_DC 0x00000004
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 3c235f0177cd..2139072065cc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -28,6 +28,7 @@
struct pp_smumgr;
struct pp_instance;
+struct pp_hwmgr;
#define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
@@ -53,6 +54,45 @@ enum AVFS_BTC_STATUS {
AVFS_BTC_SMUMSG_ERROR
};
+enum SMU_TABLE {
+ SMU_UVD_TABLE = 0,
+ SMU_VCE_TABLE,
+ SMU_SAMU_TABLE,
+ SMU_BIF_TABLE,
+};
+
+enum SMU_TYPE {
+ SMU_SoftRegisters = 0,
+ SMU_Discrete_DpmTable,
+};
+
+enum SMU_MEMBER {
+ HandshakeDisables = 0,
+ VoltageChangeTimeout,
+ AverageGraphicsActivity,
+ PreVBlankGap,
+ VBlankTimeout,
+ UcodeLoadStatus,
+ UvdBootLevel,
+ VceBootLevel,
+ SamuBootLevel,
+ LowSclkInterruptThreshold,
+};
+
+
+enum SMU_MAC_DEFINITION {
+ SMU_MAX_LEVELS_GRAPHICS = 0,
+ SMU_MAX_LEVELS_MEMORY,
+ SMU_MAX_LEVELS_LINK,
+ SMU_MAX_ENTRIES_SMIO,
+ SMU_MAX_LEVELS_VDDC,
+ SMU_MAX_LEVELS_VDDGFX,
+ SMU_MAX_LEVELS_VDDCI,
+ SMU_MAX_LEVELS_MVDD,
+ SMU_UVD_MCLK_HANDSHAKE_DISABLE,
+};
+
+
struct pp_smumgr_func {
int (*smu_init)(struct pp_smumgr *smumgr);
int (*smu_fini)(struct pp_smumgr *smumgr);
@@ -69,12 +109,23 @@ struct pp_smumgr_func {
int (*download_pptable_settings)(struct pp_smumgr *smumgr,
void **table);
int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
+ int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
+ int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
+ int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
+ int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
+ int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
+ int (*init_smc_table)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
+ int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
+ uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
+ uint32_t (*get_mac_definition)(uint32_t value);
+ bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
};
struct pp_smumgr {
uint32_t chip_family;
uint32_t chip_id;
- uint32_t hw_revision;
void *device;
void *backend;
uint32_t usec_timeout;
@@ -122,6 +173,30 @@ extern int smu_allocate_memory(void *device, uint32_t size,
extern int smu_free_memory(void *device, void *handle);
+extern int cz_smum_init(struct pp_smumgr *smumgr);
+extern int iceland_smum_init(struct pp_smumgr *smumgr);
+extern int tonga_smum_init(struct pp_smumgr *smumgr);
+extern int fiji_smum_init(struct pp_smumgr *smumgr);
+extern int polaris10_smum_init(struct pp_smumgr *smumgr);
+
+extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+
+extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
+extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result);
+extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result);
+extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
+extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
+ uint32_t type, uint32_t member);
+extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
+
+extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
+
#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index f10fb64ef981..51ff08301651 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,7 +2,9 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
+ polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
+ smu7_smumgr.o iceland_smc.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 87c023e518ab..5a44485526d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -89,13 +89,8 @@ static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
if (result != 0)
return result;
- result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-
- if (result != 0)
- return result;
-
- return 0;
}
static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
@@ -106,12 +101,12 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
if (0 != (3 & smc_address)) {
printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
- return -1;
+ return -EINVAL;
}
if (limit <= (smc_address + 3)) {
printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
- return -1;
+ return -EINVAL;
}
cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
@@ -129,9 +124,10 @@ static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
return -EINVAL;
result = cz_set_smc_sram_address(smumgr, smc_address, limit);
- cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+ if (!result)
+ cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
- return 0;
+ return result;
}
static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
@@ -148,7 +144,6 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
{
struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
- int result = 0;
uint32_t smc_address;
if (!smumgr->reload_fw) {
@@ -177,11 +172,9 @@ static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_power_profiling_index);
- result = cz_send_msg_to_smc_with_parameter(smumgr,
+ return cz_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_initialize_index);
-
- return result;
}
static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
@@ -195,9 +188,6 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
if (smumgr == NULL || smumgr->device == NULL)
return -EINVAL;
- return cgs_read_register(smumgr->device,
- mmSMU_MP1_SRBM2P_ARG_0);
-
cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
for (i = 0; i < smumgr->usec_timeout; i++) {
@@ -275,7 +265,10 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
if (smumgr->chip_id == CHIP_STONEY)
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
- cz_request_smu_load_fw(smumgr);
+ ret = cz_request_smu_load_fw(smumgr);
+ if (ret)
+ printk(KERN_ERR "[ powerplay] SMU firmware load failed\n");
+
cz_check_fw_load_finish(smumgr, fw_to_check);
ret = cz_load_mec_firmware(smumgr);
@@ -566,10 +559,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (smumgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- else
+ if (smumgr->chip_id != CHIP_STONEY)
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
cz_smu_populate_single_ucode_load_task(smumgr,
@@ -580,10 +570,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- else
+ if (smumgr->chip_id != CHIP_STONEY)
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
cz_smu_populate_single_ucode_load_task(smumgr,
@@ -610,19 +597,12 @@ static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
cz_smu->toc_entry_used_count = 0;
-
cz_smu_initialize_toc_empty_job_list(smumgr);
-
cz_smu_construct_toc_for_rlc_aram_save(smumgr);
-
cz_smu_construct_toc_for_vddgfx_enter(smumgr);
-
cz_smu_construct_toc_for_vddgfx_exit(smumgr);
-
cz_smu_construct_toc_for_power_profiling(smumgr);
-
cz_smu_construct_toc_for_bootup(smumgr);
-
cz_smu_construct_toc_for_clock_table(smumgr);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
new file mode 100644
index 000000000000..76310ac7ef0d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -0,0 +1,2374 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "fiji_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "fiji_smumgr.h"
+#include "pppcielanes.h"
+#include "smu7_ppsmc.h"
+#include "smu73.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "smu7_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define VDDC_VDDCI_DELTA 300
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
+ * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
+ */
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and
+ * [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
+ * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
+ */
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
+ {1, 0xF, 0xFD,
+ /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
+ 0x19, 5, 45}
+};
+
+/* PPGen has the gain setting generated in x * 100 unit
+ * This function is to convert the unit to x * 4096(0x1000) unit.
+ * This is the unit expected by SMC firmware
+ */
+static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ *voltage = *mvdd = 0;
+
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
+{
+ switch (line) {
+ case SMU7_I2CLineID_DDC1:
+ *scl = SMU7_I2C_DDC1CLK;
+ *sda = SMU7_I2C_DDC1DATA;
+ break;
+ case SMU7_I2CLineID_DDC2:
+ *scl = SMU7_I2C_DDC2CLK;
+ *sda = SMU7_I2C_DDC2DATA;
+ break;
+ case SMU7_I2CLineID_DDC3:
+ *scl = SMU7_I2C_DDC3CLK;
+ *sda = SMU7_I2C_DDC3DATA;
+ break;
+ case SMU7_I2CLineID_DDC4:
+ *scl = SMU7_I2C_DDC4CLK;
+ *sda = SMU7_I2C_DDC4DATA;
+ break;
+ case SMU7_I2CLineID_DDC5:
+ *scl = SMU7_I2C_DDC5CLK;
+ *sda = SMU7_I2C_DDC5DATA;
+ break;
+ case SMU7_I2CLineID_DDC6:
+ *scl = SMU7_I2C_DDC6CLK;
+ *sda = SMU7_I2C_DDC6DATA;
+ break;
+ case SMU7_I2CLineID_SCLSDA:
+ *scl = SMU7_I2C_SCL;
+ *sda = SMU7_I2C_SDA;
+ break;
+ case SMU7_I2CLineID_DDCVGA:
+ *scl = SMU7_I2C_DDCVGACLK;
+ *sda = SMU7_I2C_DDCVGADATA;
+ break;
+ default:
+ *scl = 0;
+ *sda = 0;
+ break;
+ }
+}
+
+static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &fiji_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
+
+}
+
+static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ uint8_t uc_scl, uc_sda;
+
+ /* TDP number of fraction bits are changed from 8 to 7 for Fiji
+ * as requested by SMC team
+ */
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
+
+ /* The following are for new Fiji Multi-input fan/thermal control */
+ dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid1 * 256);
+ dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid2 * 256);
+ dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrVddc * 256);
+ dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrMvdd * 256);
+ dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitPlx * 256);
+
+ dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+ dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainLiquid));
+ dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrVddc));
+ dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
+ dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainPlx));
+ dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHbm));
+
+ dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
+ dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
+ dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
+ dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
+
+ get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Liquid_I2C_LineSCL = uc_scl;
+ dpm_table->Liquid_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Vr_I2C_LineSCL = uc_scl;
+ dpm_table->Vr_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Plx_I2C_LineSCL = uc_scl;
+ dpm_table->Plx_I2C_LineSDA = uc_sda;
+
+ return 0;
+}
+
+
+static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+
+static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ 0 == hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ uint32_t pm_fuse_table_offset;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (fiji_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (fiji_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != fiji_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (fiji_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (fiji_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW19 */
+ if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Min and Max Vid Failed!",
+ return -EINVAL);
+
+ /* DW20 */
+ if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = fiji_populate_cac_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_Ulv *state)
+{
+ int result = 0;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+ }
+ return result;
+}
+
+static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ return fiji_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ssInfo;
+
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ssInfo)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ *
+ * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
+ */
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ssInfo.speed_spectrum_rate);
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+
+static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU73_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t threshold, mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = fiji_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ (uint32_t *)(&level->MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+
+ level->SclkFrequency = clock;
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+
+ threshold = clock * data->fast_watermark_threshold / 100;
+
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+
+ return 0;
+}
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+ SMU73_MAX_LEVELS_GRAPHICS;
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = fiji_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now.*/
+ levels[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+/**
+ * MCLK Frequency Ratio
+ * SEQ_CG_RESP Bit[31:24] - 0x0
+ * Bit[27:24] \96 DDR3 Frequency ratio
+ * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
+ * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
+ * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
+ * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
+ * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
+ * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
+ * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
+ * 400 < 0x7 <= 450MHz, 800 < 0xF
+ */
+static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
+{
+ if (mem_clock <= 10000)
+ return 0x0;
+ if (mem_clock <= 15000)
+ return 0x1;
+ if (mem_clock <= 20000)
+ return 0x2;
+ if (mem_clock <= 25000)
+ return 0x3;
+ if (mem_clock <= 30000)
+ return 0x4;
+ if (mem_clock <= 35000)
+ return 0x5;
+ if (mem_clock <= 40000)
+ return 0x6;
+ if (mem_clock <= 45000)
+ return 0x7;
+ if (mem_clock <= 50000)
+ return 0x8;
+ if (mem_clock <= 55000)
+ return 0x9;
+ if (mem_clock <= 60000)
+ return 0xa;
+ if (mem_clock <= 65000)
+ return 0xb;
+ if (mem_clock <= 70000)
+ return 0xc;
+ if (mem_clock <= 75000)
+ return 0xd;
+ if (mem_clock <= 80000)
+ return 0xe;
+ /* mem_clock > 800MHz */
+ return 0xf;
+}
+
+/**
+* Populates the SMC MCLK structure using the provided memory clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the memory clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
+{
+ struct pp_atomctrl_memory_clock_param mem_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to get Memory PLL Dividers.",
+ );
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = clock;
+ mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
+ mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
+
+ return result;
+}
+
+static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ uint32_t mclk_stutter_mode_threshold = 60000;
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ /* enable stutter mode if all the follow condition applied
+ * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+ * &(data->DisplayTiming.numExistingDisplays));
+ */
+ data->display_timing.num_existing_displays = 1;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
+ SMU73_MAX_LEVELS_MEMORY;
+ struct SMU73_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = fiji_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now. */
+ levels[0].EnabledForActivity = 1;
+
+ /* in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not effected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high */
+ levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param hwmgr the address of the hardware manager
+* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
+* @param voltage the SMC VOLTAGE structure to be populated
+*/
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (!data->sclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ table->ACPILevel.SclkFrequency =
+ data->dpm_table.sclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ table->ACPILevel.SclkFrequency,
+ (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value " \
+ "in Clock Dependency Table",
+ );
+ } else {
+ table->ACPILevel.SclkFrequency =
+ data->vbios_boot_state.sclk_bootup_value;
+ table->ACPILevel.MinVoltage =
+ data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+ }
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ if (!data->mclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency =
+ data->dpm_table.mclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
+ );
+ } else {
+ table->MemoryACPILevel.MclkFrequency =
+ data->vbios_boot_state.mclk_bootup_value;
+ table->MemoryACPILevel.MinVoltage =
+ data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+ }
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!fiji_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->VceLevel[count].MinVoltage |=
+ ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t)(mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burstTime;
+ ULONG state, trrds, trrdl;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+
+ state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
+ trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
+ trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+ arb_regs->TRRDS = (uint8_t)trrds;
+ arb_regs->TRRDL = (uint8_t)trrdl;
+
+ return 0;
+}
+
+static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = fiji_populate_memory_timing_parameters(hwmgr,
+ data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result)
+ break;
+ }
+ }
+
+ if (!result)
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU73_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+ }
+ return result;
+}
+
+static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (fiji_clock_stretch_amount_conversion
+ [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >=
+ (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq <
+ (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data (PowerState)
+* @return always 0
+*/
+int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+
+ fiji_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ fiji_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = fiji_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = fiji_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = fiji_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = fiji_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = fiji_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = fiji_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = fiji_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result);
+
+ result = fiji_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = fiji_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = fiji_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = fiji_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = fiji_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = fiji_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = fiji_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = fiji_init_arb_table_index(hwmgr->smumgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = fiji_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+ return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return fiji_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ result = fiji_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+ return result;
+}
+
+uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU73_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU73_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU73_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ printk("cant't get the offset of type %x member %x \n", type, member);
+ return 0;
+}
+
+uint32_t fiji_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU73_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU73_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU73_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU73_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU73_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU73_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU73_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU73_MAX_LEVELS_MVDD;
+ }
+
+ printk("cant't get the mac of %x \n", value);
+ return 0;
+}
+
+
+static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ fiji_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ fiji_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ fiji_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+
+ /* Program additional LP registers
+ * that are no longer programmed by VBIOS
+ */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+
+ return 0;
+}
+
+bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
new file mode 100644
index 000000000000..d30d150f9ca6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef FIJI_SMC_H
+#define FIJI_SMC_H
+
+#include "smumgr.h"
+#include "smu73.h"
+
+struct fiji_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+};
+
+int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
+int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
+uint32_t fiji_get_mac_definition(uint32_t value);
+int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
+int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 8e52a2e82db5..02fe1df855a9 100644..100755
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -38,6 +38,7 @@
#include "bif/bif_5_0_sh_mask.h"
#include "pp_debug.h"
#include "fiji_pwrvirus.h"
+#include "fiji_smc.h"
#define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568
@@ -57,509 +58,6 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
{ 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
};
-static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type)
-{
- enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SMU:
- result = CGS_UCODE_ID_SMU;
- break;
- case UCODE_ID_SDMA0:
- result = CGS_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = CGS_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = CGS_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = CGS_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = CGS_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC:
- result = CGS_UCODE_ID_CP_MEC;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = CGS_UCODE_ID_CP_MEC_JT1;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = CGS_UCODE_ID_CP_MEC_JT2;
- break;
- case UCODE_ID_RLC_G:
- result = CGS_UCODE_ID_RLC_G;
- break;
- default:
- break;
- }
-
- return result;
-}
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-*/
-static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr,
- uint32_t smc_addr, uint32_t limit)
-{
- PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)),
- "SMC address must be 4 byte aligned.", return -EINVAL;);
- PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)),
- "SMC address is beyond the SMC RAM area.", return -EINVAL;);
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-
- return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param smumgr the address of the powerplay SMU manager.
-* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
-* @param src the byte array to copy the bytes from.
-* @param byteCount the number of bytes to copy.
-*/
-int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr,
- uint32_t smcStartAddress, const uint8_t *src,
- uint32_t byteCount, uint32_t limit)
-{
- int result;
- uint32_t data, originalData;
- uint32_t addr, extraShift;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
- "SMC address must be 4 byte aligned.", return -EINVAL;);
- PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
- "SMC address is beyond the SMC RAM area.", return -EINVAL;);
-
- addr = smcStartAddress;
-
- while (byteCount >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first. */
- data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
- result = fiji_set_smc_sram_address(smumgr, addr, limit);
- if (result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-
- src += 4;
- byteCount -= 4;
- addr += 4;
- }
-
- if (byteCount) {
- /* Now write the odd bytes left.
- * Do a read modify write cycle.
- */
- data = 0;
-
- result = fiji_set_smc_sram_address(smumgr, addr, limit);
- if (result)
- return result;
-
- originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
- extraShift = 8 * (4 - byteCount);
-
- while (byteCount > 0) {
- /* Bytes are written into the SMC addres
- * space with the MSB first.
- */
- data = (0x100 * data) + *src++;
- byteCount--;
- }
- data <<= extraShift;
- data |= (originalData & ~((~0UL) << extraShift));
-
- result = fiji_set_smc_sram_address(smumgr, addr, limit);
- if (!result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
- }
- return 0;
-}
-
-int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
-{
- static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
-
- fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
-
- return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param smumgr the address of the powerplay hardware manager.
-*/
-bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
- CGS_IND_REG__SMC,
- SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device,
- CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
- if (!fiji_is_smc_ram_running(smumgr))
- return -1;
-
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
- printk(KERN_ERR "Failed to send Previous Message.");
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- }
-
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- return 0;
-}
-
-/**
- * Send a message to the SMC with parameter
- * @param smumgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return The response that came from the SMC.
- */
-int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter)
-{
- if (!fiji_is_smc_ram_running(smumgr))
- return -1;
-
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
- printk(KERN_ERR "Failed to send Previous Message.");
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- }
-
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- return 0;
-}
-
-
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-int fiji_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
- printk(KERN_ERR "Failed to send Previous Message.");
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- }
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-/**
-* Uploads the SMU firmware from .hex file
-*
-* @param smumgr the address of the powerplay SMU manager.
-* @return 0 or -1.
-*/
-
-static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr)
-{
- const uint8_t *src;
- uint32_t byte_count;
- uint32_t *data;
- struct cgs_firmware_info info = {0};
-
- cgs_get_firmware_info(smumgr->device,
- fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-
- if (info.image_size & 3) {
- printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (info.image_size > FIJI_SMC_SIZE) {
- printk(KERN_ERR "SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
- byte_count = info.image_size;
- src = (const uint8_t *)info.kptr;
-
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
-
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- return 0;
-}
-
-/**
-* Read a 32bit value from the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value and output parameter for the data read from the SMC SRAM.
-*/
-int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
- uint32_t *value, uint32_t limit)
-{
- int result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
-
- if (result)
- return result;
-
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
- return 0;
-}
-
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value to write to the SMC SRAM.
-*/
-int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
- uint32_t value, uint32_t limit)
-{
- int result;
-
- result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
-
- if (result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
- return 0;
-}
-
-static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type)
-{
- uint32_t result = 0;
-
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- result = UCODE_ID_SDMA0_MASK;
- break;
- case UCODE_ID_SDMA1:
- result = UCODE_ID_SDMA1_MASK;
- break;
- case UCODE_ID_CP_CE:
- result = UCODE_ID_CP_CE_MASK;
- break;
- case UCODE_ID_CP_PFP:
- result = UCODE_ID_CP_PFP_MASK;
- break;
- case UCODE_ID_CP_ME:
- result = UCODE_ID_CP_ME_MASK;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK;
- break;
- case UCODE_ID_RLC_G:
- result = UCODE_ID_RLC_G_MASK;
- break;
- default:
- printk(KERN_ERR "UCode type is out of range!");
- result = 0;
- }
-
- return result;
-}
-
-/* Populate one firmware image to the data structure */
-static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr,
- uint32_t fw_type, struct SMU_Entry *entry)
-{
- int result;
- struct cgs_firmware_info info = {0};
-
- result = cgs_get_firmware_info(
- smumgr->device,
- fiji_convert_fw_type_to_cgs(fw_type),
- &info);
-
- if (!result) {
- entry->version = 0;
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
- entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = info.image_size;
- entry->num_register_entries = 0;
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
- }
-
- return result;
-}
-
-static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
- uint32_t fw_to_load;
- struct SMU_DRAMData_TOC *toc;
-
- if (priv->soft_regs_start)
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
- priv->soft_regs_start +
- offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
- 0x0);
-
- toc = (struct SMU_DRAMData_TOC *)priv->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
-
- fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI,
- priv->header_buffer.mc_addr_high);
- fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO,
- priv->header_buffer.mc_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK
- + UCODE_ID_SDMA0_MASK
- + UCODE_ID_SDMA1_MASK
- + UCODE_ID_CP_CE_MASK
- + UCODE_ID_CP_ME_MASK
- + UCODE_ID_CP_PFP_MASK
- + UCODE_ID_CP_MEC_MASK
- + UCODE_ID_CP_MEC_JT1_MASK
- + UCODE_ID_CP_MEC_JT2_MASK;
-
- if (fiji_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_LoadUcodes, fw_to_load))
- printk(KERN_ERR "Fail to Request SMU Load uCode");
-
- return 0;
-}
-
-
-/* Check if the FW has been loaded, SMU will not return
- * if loading has not finished.
- */
-static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr,
- uint32_t fw_type)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
- uint32_t mask = fiji_get_mask_for_firmware_type(fw_type);
-
- /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
- if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX,
- priv->soft_regs_start +
- offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
- mask, mask)) {
- printk(KERN_ERR "check firmware loading failed\n");
- return -EINVAL;
- }
- return 0;
-}
-
-
-static int fiji_reload_firmware(struct pp_smumgr *smumgr)
-{
- return smumgr->smumgr_funcs->start_smu(smumgr);
-}
-
-static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr)
-{
- uint32_t value;
-
- value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER);
- if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) {
- /* driver reads on SR-IOV enabled PF: 0x80000000
- * driver reads on SR-IOV enabled VF: 0x80000001
- * driver reads on SR-IOV disabled: 0x00000000
- */
- return true;
- }
- return false;
-}
-
-static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type)
-{
- if (fiji_is_hw_virtualization_enabled(smumgr)) {
- uint32_t masks = fiji_get_mask_for_firmware_type(fw_type);
- if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr,
- PPSMC_MSG_LoadUcodes, masks))
- printk(KERN_ERR "Fail to Request SMU Load uCode");
- }
- /* For non-virtualization cases,
- * SMU loads all FWs at once in fiji_request_smu_load_fw.
- */
- return 0;
-}
-
static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
{
int result = 0;
@@ -571,7 +69,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = fiji_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result)
return result;
@@ -610,8 +108,8 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
- SMU_STATUS, SMU_PASS)) {
+ if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMU_STATUS, SMU_PASS) != 1) {
PP_ASSERT_WITH_CODE(false,
"SMU Firmware start failed!", return -1);
}
@@ -639,12 +137,12 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = fiji_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result)
return result;
/* Set smc instruct start point at 0x0 */
- fiji_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(smumgr);
/* Enable clock */
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -698,15 +196,15 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
if (priv->avfs.AvfsBtcParam) {
- if (!fiji_send_msg_to_smc_with_parameter(smumgr,
+ if (!smum_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
- if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
+ if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
result = 0;
} else {
printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt"
" to Enable AVFS Failed!");
- fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
+ smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
result = -1;
}
} else {
@@ -736,7 +234,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
- PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
PmFuseTable), &table_start, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
@@ -748,13 +246,13 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
inversion_voltage_addr = table_start +
offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
- result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr,
+ result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
(uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
PP_ASSERT_WITH_CODE(0 == result,
"[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
"be populated.", return -1;);
- result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
+ result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
(uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
"charz_freq could not be populated.", return -1;);
@@ -769,7 +267,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
uint32_t level_addr, vr_config_addr;
uint32_t level_size = sizeof(avfs_graphics_level);
- PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, DpmTable),
&table_start, 0x40000),
@@ -784,7 +282,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_addr = table_start +
offsetof(SMU73_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
(uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
"vr_config value over to SMC",
@@ -792,7 +290,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
(uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1;);
@@ -839,13 +337,13 @@ int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
break;
case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
- PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
- PPSMC_MSG_VftTableIsValid),
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
+ 0x666),
"[AVFS][fiji_avfs_event_mgr] SMU did not respond "
"correctly to VftTableIsValid Msg",
return -1;);
priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
- PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
PPSMC_MSG_EnableAvfs),
"[AVFS][fiji_avfs_event_mgr] SMU did not respond "
"correctly to EnableAvfs Message Msg",
@@ -898,7 +396,7 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
/* Only start SMC if SMC RAM is not running */
- if (!fiji_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(smumgr)) {
fiji_avfs_event_mgr(smumgr, false);
/* Check if SMU is running in protected mode */
@@ -929,12 +427,12 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
/* Setup SoftRegsStart here for register lookup in case
* DummyBackEnd is used and ProcessFirmwareHeader is not executed
*/
- fiji_read_smc_sram_dword(smumgr,
+ smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, SoftRegisters),
- &(priv->soft_regs_start), 0x40000);
+ &(priv->smu7_data.soft_regs_start), 0x40000);
- result = fiji_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(smumgr);
return result;
}
@@ -963,28 +461,10 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
static int fiji_smu_init(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
- uint64_t mc_addr;
-
- priv->header_buffer.data_size =
- ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- smu_allocate_memory(smumgr->device,
- priv->header_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &priv->header_buffer.kaddr,
- &priv->header_buffer.handle);
-
- priv->header = priv->header_buffer.kaddr;
- priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != priv->header),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)priv->header_buffer.handle);
- return -1);
+ int i;
+
+ if (smu7_init(smumgr))
+ return -EINVAL;
priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
if (fiji_is_hw_avfs_present(smumgr))
@@ -999,37 +479,35 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
else
priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
- priv->acpi_optimization = 1;
+ for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
+ priv->activity_target[i] = 30;
return 0;
}
-static int fiji_smu_fini(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
- smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
-
- if (smumgr->backend) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
-
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- return 0;
-}
static const struct pp_smumgr_func fiji_smu_funcs = {
.smu_init = &fiji_smu_init,
- .smu_fini = &fiji_smu_fini,
+ .smu_fini = &smu7_smu_fini,
.start_smu = &fiji_start_smu,
- .check_fw_load_finish = &fiji_check_fw_load_finish,
- .request_smu_load_fw = &fiji_reload_firmware,
- .request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load,
- .send_msg_to_smc = &fiji_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter,
+ .check_fw_load_finish = &smu7_check_fw_load_finish,
+ .request_smu_load_fw = &smu7_reload_firmware,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = &smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
+ .update_smc_table = fiji_update_smc_table,
+ .get_offsetof = fiji_get_offsetof,
+ .process_firmware_header = fiji_process_firmware_header,
+ .init_smc_table = fiji_init_smc_table,
+ .update_sclk_threshold = fiji_update_sclk_threshold,
+ .thermal_setup_fan_table = fiji_thermal_setup_fan_table,
+ .populate_all_graphic_levels = fiji_populate_all_graphic_levels,
+ .populate_all_memory_levels = fiji_populate_all_memory_levels,
+ .get_mac_definition = fiji_get_mac_definition,
+ .initialize_mc_reg_table = fiji_initialize_mc_reg_table,
+ .is_dpm_running = fiji_is_dpm_running,
};
int fiji_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index b4eb483215b1..adcbdfb209be 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -23,37 +23,31 @@
#ifndef _FIJI_SMUMANAGER_H_
#define _FIJI_SMUMANAGER_H_
+#include "smu73_discrete.h"
+#include <pp_endian.h>
+#include "smu7_smumgr.h"
+
+
struct fiji_smu_avfs {
enum AVFS_BTC_STATUS AvfsBtcStatus;
uint32_t AvfsBtcParam;
};
-struct fiji_buffer_entry {
- uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
- void *kaddr;
- unsigned long handle;
-};
struct fiji_smumgr {
- uint8_t *header;
- uint8_t *mec_image;
- uint32_t soft_regs_start;
+ struct smu7_smumgr smu7_data;
+
struct fiji_smu_avfs avfs;
- uint32_t acpi_optimization;
+ struct SMU73_Discrete_DpmTable smc_state_table;
+ struct SMU73_Discrete_Ulv ulv_setting;
+ struct SMU73_Discrete_PmFuses power_tune_table;
+ const struct fiji_pt_defaults *power_tune_defaults;
+ uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
- struct fiji_buffer_entry header_buffer;
};
-int fiji_smum_init(struct pp_smumgr *smumgr);
-int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
- uint32_t *value, uint32_t limit);
-int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
- uint32_t value, uint32_t limit);
-int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress,
- const uint8_t *src, uint32_t byteCount, uint32_t limit);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
new file mode 100644
index 000000000000..eda802bc63c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -0,0 +1,2576 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ *
+ */
+
+#include "iceland_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "smu7_ppsmc.h"
+
+#include "smu71_discrete.h"
+
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "processpptables.h"
+
+#include "iceland_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
+
+static const struct iceland_pt_defaults defaults_iceland = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+ * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+static const struct iceland_pt_defaults defaults_icelandxt = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+static const struct iceland_pt_defaults defaults_icelandpro = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ switch (dev_id) {
+ case DEVICE_ID_VI_ICELAND_M_6900:
+ case DEVICE_ID_VI_ICELAND_M_6903:
+ smu_data->power_tune_defaults = &defaults_icelandxt;
+ break;
+
+ case DEVICE_ID_VI_ICELAND_M_6901:
+ case DEVICE_ID_VI_ICELAND_M_6902:
+ smu_data->power_tune_defaults = &defaults_icelandpro;
+ break;
+ default:
+ smu_data->power_tune_defaults = &defaults_iceland;
+ pr_warning("Unknown V.I. Device ID.\n");
+ break;
+ }
+ return;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 8; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+ "The CAC Leakage table does not exist!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+ "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+ "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ }
+ } else {
+ PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t *vid = smu_data->power_tune_table.VddCVid;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+ "There should never be more than 8 entries for VddcVid!!!",
+ return -EINVAL);
+
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+ vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+ }
+
+ return 0;
+}
+
+
+
+static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW0 - DW3 */
+ if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate bapm vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW4 - DW5 */
+ if (iceland_populate_vddc_vid(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (iceland_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (iceland_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != iceland_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW16 */
+ if (iceland_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW17 */
+ if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Min and Max Vid Failed!",
+ return -EINVAL);
+
+ /* DW18 */
+ if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+ return 0;
+}
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+ uint16_t *lo)
+{
+ uint16_t v_index;
+ bool vol_found = false;
+ *hi = tab->value * VOLTAGE_SCALE;
+ *lo = tab->value * VOLTAGE_SCALE;
+
+ /* SCLK/VDDC Dependency Table has to exist. */
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+ "The SCLK/VDDC Dependency Table does not exist.\n",
+ return -EINVAL);
+
+ if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+ pr_warning("CAC Leakage Table does not exist, using vddc.\n");
+ return 0;
+ }
+
+ /*
+ * Since voltage in the sclk/vddc dependency table is not
+ * necessarily in ascending order because of ELB voltage
+ * patching, loop through entire list to find exact voltage.
+ */
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+ } else {
+ pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ /*
+ * If voltage is not found in the first pass, loop again to
+ * find the best match, equal or higher value.
+ */
+ if (!vol_found) {
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+ } else {
+ pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ if (!vol_found)
+ pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab,
+ SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+{
+ int result;
+
+ result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+ &smc_voltage_tab->StdVoltageHiSidd,
+ &smc_voltage_tab->StdVoltageLoSidd);
+ if (0 != result) {
+ smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+ smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddc_voltage_table.entries[count]),
+ &(table->VddcLevel[count]));
+ PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+ /* GPIO voltage control */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+ table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->VddcLevel[count].Smio = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddci_voltage_table.entries[count]),
+ &(table->VddciLevel[count]));
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->mvdd_voltage_table.entries[count]),
+ &table->MvddLevel[count]);
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+ table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+ return 0;
+}
+
+
+static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = iceland_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU71_Discrete_Ulv *state)
+{
+ uint32_t voltage_response_time, ulv_voltage;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+ PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+ if (ulv_voltage == 0) {
+ data->ulv_supported = false;
+ return 0;
+ }
+
+ if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffset = 0;
+ else
+ /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+ state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+ } else {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffsetVid = 0;
+ else /* used in SVI2 Mode */
+ state->VddcOffsetVid = (uint8_t)(
+ (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+ * VOLTAGE_VID_OFFSET_SCALE2
+ / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_Ulv *ulv_level)
+{
+ return iceland_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param engine_clock the engine clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param engine_clock the engine clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
+ &graphic_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for VDDC \
+ engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ graphic_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ iceland_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 100;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param hwmgr the address of the hardware manager
+ */
+int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
+ SMU71_MAX_LEVELS_GRAPHICS;
+
+ SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = iceland_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (highest_pcie_level_enabled + 1))) != 0) {
+ highest_pcie_level_enabled++;
+ }
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+ count++;
+ }
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+ }
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param memory_clock the memory clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 47500) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ }
+ } else {
+ if (memory_clock < 65000) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 135000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000) {
+ mc_para_index = 0;
+ } else if (memory_clock >= 80000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+ }
+
+ return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+ uint32_t memory_clock, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (memory_clock < pl->entries[i].Mclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *memory_level
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+ result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+ }
+
+ if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
+ memory_level->MinVddci = memory_level->MinVddc;
+ } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddci_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinVddci);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control) {
+ iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ memory_clock, &memory_level->MinVddcPhases);
+ }
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ /* stutter mode not support on iceland */
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ else
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ } else
+ dll_state_on = data->dll_default_on;
+ } else {
+ memory_level->StrobeRatio =
+ iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = iceland_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (0 == result) {
+ memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+ memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+/**
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+ *
+ * @param hwmgr the address of the hardware manager
+ */
+
+int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+ SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero", return -EINVAL);
+ result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (0 != result) {
+ return result;
+ }
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU71_Discrete_VoltageLevel *voltage)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+ if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+ "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t vddc_phase_shed_control = 0;
+
+ SMU71_Discrete_VoltageLevel voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (data->acpi_vddc)
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
+ CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+ else {
+ if (data->acpi_vddci != 0)
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+ }
+
+ if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ * This function is to be called from the SetPowerState table.
+ */
+static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ int result = 0;
+ SMU71_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = iceland_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (0 != result) {
+ break;
+ }
+ }
+ }
+
+ if (0 == result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU71_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+ in dependency table. Using Graphics DPM level 0!");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+ in dependency table. Using Memory DPM level 0!");
+ result = 0;
+ }
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->BootVddci = table->BootVddc;
+ else
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ return result;
+}
+
+static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
+ SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void iceland_convert_mc_registers(
+ const struct iceland_mc_reg_entry *entry,
+ SMU71_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int iceland_convert_mc_reg_table_entry_to_smc(
+ struct pp_smumgr *smumgr,
+ const uint32_t memory_clock,
+ SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = iceland_convert_mc_reg_table_entry_to_smc(
+ hwmgr->smumgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+ result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for driver state!", return result;);
+
+ return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t count, level;
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+ >= data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+ >= data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
+
+
+ /*
+ * TDP number of fraction bits are changed from 8 to 7 for Iceland
+ * as requested by SMC team
+ */
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+
+ dpm_table->DTETjOffset = 0;
+
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ /* The following are for new Iceland Multi-input fan/thermal control */
+ if (NULL != ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU71_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *tab)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ tab->SVI2Enable |= VDDC_ON_SVI2;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ tab->SVI2Enable |= VDDCI_ON_SVI2;
+ else
+ tab->MergedVddci = 1;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+ tab->SVI2Enable |= MVDD_ON_SVI2;
+
+ PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+ (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+ return 0;
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pInput the pointer to input data (PowerState)
+ * @return always 0
+ */
+int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+
+
+ iceland_initialize_power_tune_defaults(hwmgr);
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+ iceland_populate_smc_voltage_tables(hwmgr, table);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+
+ if (data->ulv_supported) {
+ result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = iceland_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result;);
+
+ result = iceland_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result;);
+
+ result = iceland_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result;);
+
+ result = iceland_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result;);
+
+ result = iceland_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result;);
+
+ result = iceland_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result;);
+
+ result = iceland_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result;);
+
+ /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+ /* need to populate the ARB settings for the initial state. */
+ result = iceland_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result;);
+
+ result = iceland_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result;);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ result = iceland_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result;);
+
+ result = iceland_populate_smc_initial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+ result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+
+ table->TemperatureLimitHigh =
+ (data->thermal_temp_setting.temperature_high *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ table->TemperatureLimitLow =
+ (data->thermal_temp_setting.temperature_low *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+
+ result = iceland_populate_smc_svi2_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate SVI2 setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result;);
+
+ /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ smu_data->smu7_data.ulv_setting_starts,
+ (uint8_t *)&(smu_data->ulv_setting),
+ sizeof(SMU71_Discrete_Ulv),
+ SMC_RAM_END);
+
+
+ result = iceland_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate initialize MC Reg table!", return result);
+
+ result = iceland_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+ SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (0 == smu7_data->fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ /* fan_table.FanControl_GL_Flag = 1; */
+
+ res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return iceland_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = iceland_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+ result = iceland_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU71_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ printk("cant't get the offset of type %x member %x \n", type, member);
+ return 0;
+}
+
+uint32_t iceland_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU71_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU71_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU71_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU71_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU71_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU71_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU71_MAX_LEVELS_MVDD;
+ }
+
+ printk("cant't get the mac of %x \n", value);
+ return 0;
+}
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->dpm_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ data->soft_regs_start = tmp;
+ smu7_data->soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->mc_reg_table_start = tmp;
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->fan_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->arb_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ hwmgr->microcode_version_info.SMC = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, UlvSettings),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->ulv_setting_starts = tmp;
+ }
+
+ error |= (0 != result);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ? address : table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct iceland_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++) {
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+ }
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
+ * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3. need to set these data for each clock range
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param table the address of MCRegTable
+ * @return always 0
+ */
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j;
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (NULL == table)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (0 == result)
+ result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (0 == result) {
+ iceland_set_s0_mc_reg_index(ni_table);
+ result = iceland_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (0 == result)
+ iceland_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
index 8bc38cb17b7f..13c8dbbccaf2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
@@ -20,17 +20,21 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#ifndef _ICELAND_SMC_H
+#define _ICELAND_SMC_H
-#ifndef _TONGA_CLOCK_POWER_GATING_H_
-#define _TONGA_CLOCK_POWER_GATING_H_
+#include "smumgr.h"
-#include "tonga_hwmgr.h"
-#include "pp_asicblocks.h"
-extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
-#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
+int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
+uint32_t iceland_get_mac_definition(uint32_t value);
+int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
+int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
new file mode 100644
index 000000000000..eeafefc4acba
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+
+#include "smumgr.h"
+#include "iceland_smumgr.h"
+#include "pp_debug.h"
+#include "smu_ucode_xfer_vi.h"
+#include "ppsmc.h"
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+#include "cgs_common.h"
+#include "iceland_smc.h"
+
+#define ICELAND_SMC_SIZE 0x20000
+
+static int iceland_start_smc(struct pp_smumgr *smumgr)
+{
+ SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+ return 0;
+}
+
+static void iceland_reset_smc(struct pp_smumgr *smumgr)
+{
+ SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL,
+ rst_reg, 1);
+}
+
+
+static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
+{
+ SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0,
+ ck_disable, 1);
+}
+
+static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+{
+ SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0,
+ ck_disable, 0);
+}
+
+static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+{
+ /* set smc instruct start point at 0x0 */
+ smu7_program_jump_on_start(smumgr);
+
+ /* enable smc clock */
+ iceland_start_smc_clock(smumgr);
+
+ /* de-assert reset */
+ iceland_start_smc(smumgr);
+
+ SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+ INTERRUPTS_ENABLED, 1);
+
+ return 0;
+}
+
+
+static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
+ uint32_t length, const uint8_t *src,
+ uint32_t limit, uint32_t start_addr)
+{
+ uint32_t byte_count = length;
+ uint32_t data;
+
+ PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+ cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+ while (byte_count >= 4) {
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+ src += 4;
+ byte_count -= 4;
+ }
+
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+
+ return 0;
+}
+
+
+static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+{
+ uint32_t val;
+ struct cgs_firmware_info info = {0};
+
+ if (smumgr == NULL || smumgr->device == NULL)
+ return -EINVAL;
+
+ /* load SMC firmware */
+ cgs_get_firmware_info(smumgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+
+ if (info.image_size & 3) {
+ pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n");
+ return -EINVAL;
+ }
+
+ if (info.image_size > ICELAND_SMC_SIZE) {
+ pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n");
+ return -EINVAL;
+ }
+
+ /* wait for smc boot up */
+ SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ RCU_UC_EVENTS, boot_seq_done, 0);
+
+ /* clear firmware interrupt enable flag */
+ val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ ixSMC_SYSCON_MISC_CNTL);
+ cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ ixSMC_SYSCON_MISC_CNTL, val | 1);
+
+ /* stop smc clock */
+ iceland_stop_smc_clock(smumgr);
+
+ /* reset smc */
+ iceland_reset_smc(smumgr);
+ iceland_upload_smc_firmware_data(smumgr, info.image_size,
+ (uint8_t *)info.kptr, ICELAND_SMC_SIZE,
+ info.ucode_start_address);
+
+ return 0;
+}
+
+static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+ uint32_t firmwareType)
+{
+ return 0;
+}
+
+static int iceland_start_smu(struct pp_smumgr *smumgr)
+{
+ int result;
+
+ result = iceland_smu_upload_firmware_image(smumgr);
+ if (result)
+ return result;
+ result = iceland_smu_start_smc(smumgr);
+ if (result)
+ return result;
+
+ if (!smu7_is_smc_ram_running(smumgr)) {
+ printk("smu not running, upload firmware again \n");
+ result = iceland_smu_upload_firmware_image(smumgr);
+ if (result)
+ return result;
+
+ result = iceland_smu_start_smc(smumgr);
+ if (result)
+ return result;
+ }
+
+ result = smu7_request_smu_load_fw(smumgr);
+
+ return result;
+}
+
+/**
+ * Write a 32bit value to the SMC SRAM space.
+ * ALL PARAMETERS ARE IN HOST BYTE ORDER.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param smcAddress the address in the SMC RAM to access.
+ * @param value to write to the SMC SRAM.
+ */
+static int iceland_smu_init(struct pp_smumgr *smumgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ if (smu7_init(smumgr))
+ return -EINVAL;
+
+ for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
+ smu_data->activity_target[i] = 30;
+
+ return 0;
+}
+
+static const struct pp_smumgr_func iceland_smu_funcs = {
+ .smu_init = &iceland_smu_init,
+ .smu_fini = &smu7_smu_fini,
+ .start_smu = &iceland_start_smu,
+ .check_fw_load_finish = &smu7_check_fw_load_finish,
+ .request_smu_load_fw = &smu7_reload_firmware,
+ .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
+ .send_msg_to_smc = &smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .get_offsetof = iceland_get_offsetof,
+ .process_firmware_header = iceland_process_firmware_header,
+ .init_smc_table = iceland_init_smc_table,
+ .update_sclk_threshold = iceland_update_sclk_threshold,
+ .thermal_setup_fan_table = iceland_thermal_setup_fan_table,
+ .populate_all_graphic_levels = iceland_populate_all_graphic_levels,
+ .populate_all_memory_levels = iceland_populate_all_memory_levels,
+ .get_mac_definition = iceland_get_mac_definition,
+ .initialize_mc_reg_table = iceland_initialize_mc_reg_table,
+ .is_dpm_running = iceland_is_dpm_running,
+};
+
+int iceland_smum_init(struct pp_smumgr *smumgr)
+{
+ struct iceland_smumgr *iceland_smu = NULL;
+
+ iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
+
+ if (iceland_smu == NULL)
+ return -ENOMEM;
+
+ smumgr->backend = iceland_smu;
+ smumgr->smumgr_funcs = &iceland_smu_funcs;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
new file mode 100644
index 000000000000..8eae01b37c40
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#ifndef _ICELAND_SMUMGR_H_
+#define _ICELAND_SMUMGR_H_
+
+
+#include "smu7_smumgr.h"
+#include "pp_endian.h"
+#include "smu71_discrete.h"
+
+struct iceland_pt_defaults {
+ uint8_t svi_load_line_en;
+ uint8_t svi_load_line_vddc;
+ uint8_t tdc_vddc_throttle_release_limit_perc;
+ uint8_t tdc_mawt;
+ uint8_t tdc_waterfall_ctl;
+ uint8_t dte_ambient_temp_base;
+ uint32_t display_cac;
+ uint32_t bamp_temp_gradient;
+ uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+ uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+};
+
+struct iceland_mc_reg_entry {
+ uint32_t mclk_max;
+ uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct iceland_mc_reg_table {
+ uint8_t last; /* number of registers*/
+ uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
+ uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+ struct iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct iceland_smumgr {
+ struct smu7_smumgr smu7_data;
+ struct SMU71_Discrete_DpmTable smc_state_table;
+ struct SMU71_Discrete_PmFuses power_tune_table;
+ struct SMU71_Discrete_Ulv ulv_setting;
+ const struct iceland_pt_defaults *power_tune_defaults;
+ SMU71_Discrete_MCRegisters mc_regs;
+ struct iceland_mc_reg_table mc_reg_table;
+ uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
new file mode 100644
index 000000000000..4ccc0b72324d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -0,0 +1,2287 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "polaris10_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "polaris10_smumgr.h"
+#include "pppcielanes.h"
+
+#include "smu_ucode_xfer_vi.h"
+#include "smu74_discrete.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "polaris10_pwrvirus.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VDDC_VDDCI_DELTA 200
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+ { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
+ {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
+
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ *voltage = *mvdd = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ int i, j, k;
+ const uint16_t *pdef1;
+ const uint16_t *pdef2;
+
+ table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+ table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+ pdef1 = defaults->BAPMTI_R;
+ pdef2 = defaults->BAPMTI_RC;
+
+ for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU74_DTE_SINKS; k++) {
+ table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+ table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+/* TO DO move to hwmgr */
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+ if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ if (0 != polaris10_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Min and Max Vid Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param *hwmgr The address of the hardware manager.
+ * @param *table The SMC DPM table structure to be populated.
+ * @return 0
+ */
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count, level;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ count = data->mvdd_voltage_table.count;
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; level++) {
+ table->SmioTable2.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[level].Smio =
+ (uint8_t) level;
+ table->Smio[level] |=
+ data->mvdd_voltage_table.entries[level].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count, level;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ count = data->vddci_voltage_table.count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; ++level) {
+ table->SmioTable1.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+ table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+ table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+ return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ polaris10_populate_smc_vddci_table(hwmgr, table);
+ polaris10_populate_smc_mvdd_table(hwmgr, table);
+ polaris10_populate_cac_table(hwmgr, table);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_Ulv *state)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ uint32_t i, ref_clk;
+
+ struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+ ref_clk = smu7_get_xclk(hwmgr);
+
+ if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+ table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+ return;
+ }
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+ smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+ table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+ table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+}
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_clock_dividers_ai dividers;
+ uint32_t ref_clock;
+ uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+ uint8_t i;
+ int result;
+ uint64_t temp;
+
+ sclk_setting->SclkFrequency = clock;
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
+ if (result == 0) {
+ sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+ sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+ sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+ sclk_setting->PllRange = dividers.ucSclkPllRange;
+ sclk_setting->Sclk_slew_rate = 0x400;
+ sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+ sclk_setting->Pcc_down_slew_rate = 0xffff;
+ sclk_setting->SSc_En = dividers.ucSscEnable;
+ sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+ sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+ sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+ return result;
+ }
+
+ ref_clock = smu7_get_xclk(hwmgr);
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ if (clock > smu_data->range_table[i].trans_lower_frequency
+ && clock <= smu_data->range_table[i].trans_upper_frequency) {
+ sclk_setting->PllRange = i;
+ break;
+ }
+ }
+
+ sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw_frac = temp & 0xffff;
+
+ pcc_target_percent = 10; /* Hardcode 10% for now. */
+ pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+ sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+ ss_target_percent = 2; /* Hardcode 2% for now. */
+ sclk_setting->SSc_En = 0;
+ if (ss_target_percent) {
+ sclk_setting->SSc_En = 1;
+ ss_target_freq = clock - (clock * ss_target_percent / 100);
+ sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw1_frac = temp & 0xffff;
+ }
+
+ return 0;
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU74_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMU_SclkSetting curr_sclk_setting = { 0 };
+
+ result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+ /* populate graphics levels */
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ &level->MinVoltage, &mvdd);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+ level->ActivityLevel = sclk_al_threshold;
+
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ if (data->update_up_hyst)
+ level->UpHyst = (uint8_t)data->up_hyst;
+ if (data->update_down_hyst)
+ level->DownHyst = (uint8_t)data->down_hyst;
+
+ level->SclkSetting = curr_sclk_setting;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+ return 0;
+}
+
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+ result = polaris10_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport))
+ smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ struct cgs_display_info info = {0, 0, NULL};
+ uint32_t mclk_stutter_mode_threshold = 40000;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ &mem_level->MinVoltage, &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->MclkFrequency = clock;
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+ SMU74_MAX_LEVELS_MEMORY;
+ struct SMU74_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = polaris10_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (i == dpm_table->mclk_table.count - 1) {
+ levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+ levels[i].EnabledForActivity = 1;
+ }
+ if (result)
+ return result;
+ }
+
+ /* In order to prevent MC activity from stutter mode to push DPM up,
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not affected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = 0x1f;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param hwmgr the address of the hardware manager
+* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
+* @param voltage the SMC VOLTAGE structure to be populated
+*/
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint32_t sclk_frequency;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
+ result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
+ PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ table->ACPILevel.DeepSleepDivId = 0;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!polaris10_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ table->MemoryACPILevel.StutterEnable = false;
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+ table->VceLevel[count].MinVoltage |=
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burst_time;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burst_time;
+
+ return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+ result = polaris10_populate_memory_timing_parameters(hwmgr,
+ hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+ hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result == 0)
+ result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
+ if (result != 0)
+ return result;
+ }
+ }
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU74_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+ }
+
+ return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ hw_data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ hw_data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (67 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ min = 1000;
+ max = 2300;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
+
+ ro = efuse * (max - min) / 255 + min;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+ } else {
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+ }
+
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
+ offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return result;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
+ table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = smu7_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+ &tmp, SMC_RAM_END);
+
+ smu7_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ SMC_RAM_END);
+
+ result = smu7_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, SMC_RAM_END);
+ smu7_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ SMC_RAM_END);
+
+ data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+
+/**
+* Initialize the ARB DRAM timing table's index field.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &polaris10_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ pp_atomctrl_clock_dividers_vi dividers;
+
+ polaris10_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+ polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (hw_data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = polaris10_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+ }
+
+ result = polaris10_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = polaris10_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = polaris10_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = polaris10_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = polaris10_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = polaris10_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = polaris10_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = polaris10_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = polaris10_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = polaris10_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ result = polaris10_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+ table->CurrSclkPllRange = 0xff;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = polaris10_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+ & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+ && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ /* Populate BIF_SCLK levels into SMC DPM table */
+ for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
+ PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+ if (i == 0)
+ table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ else
+ table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ }
+
+ for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = polaris10_init_arb_table_index(hwmgr->smumgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = polaris10_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+ return 0;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return polaris10_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return 0;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+ ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
+ 0 : -1;
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+
+static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ int max_entry, i;
+
+ max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+ SMU74_MAX_LEVELS_LINK :
+ pcie_table->count;
+ /* Setup BIF_SCLK levels */
+ for (i = 0; i < max_entry; i++)
+ smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+ return 0;
+}
+
+int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ polaris10_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ polaris10_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ polaris10_update_samu_smc_table(hwmgr);
+ break;
+ case SMU_BIF_TABLE:
+ polaris10_update_bif_smc_table(hwmgr);
+ default:
+ break;
+ }
+ return 0;
+}
+
+int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to update SCLK threshold!", return result);
+
+ result = polaris10_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU74_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU74_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU74_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ printk("cant't get the offset of type %x member %x \n", type, member);
+ return 0;
+}
+
+uint32_t polaris10_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU74_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU74_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU74_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU74_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU74_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU74_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU74_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU74_MAX_LEVELS_MVDD;
+ case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+ return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+ }
+
+ printk("cant't get the mac of %x \n", value);
+ return 0;
+}
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.soft_regs_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
new file mode 100644
index 000000000000..5ade3cea8bb7
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef POLARIS10_SMC_H
+#define POLARIS10_SMC_H
+
+#include "smumgr.h"
+
+
+int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
+int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
+int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
+uint32_t polaris10_get_mac_definition(uint32_t value);
+int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
+bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5dba7c509710..5c3598ab7dae 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -38,16 +38,11 @@
#include "ppatomctrl.h"
#include "pp_debug.h"
#include "cgs_common.h"
+#include "polaris10_smc.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
-#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_SCALE 4
-
-/* Microcode file is stored in this buffer */
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072 /* 128 *1024 */
-
-#define SMC_RAM_END 0x40000
+#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
/* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
@@ -62,572 +57,9 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
{ 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
};
-static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
- {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smcAddress the address in the SMC RAM to access.
-*/
-static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
-{
- PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
- PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
- return 0;
-}
-
-/**
-* Copy bytes from SMC RAM space into driver memory.
-*
-* @param smumgr the address of the powerplay SMU manager.
-* @param smc_start_address the start address in the SMC RAM to copy bytes from
-* @param src the byte array to copy the bytes to.
-* @param byte_count the number of bytes to copy.
-*/
-int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
- uint32_t data;
- uint32_t addr;
- uint8_t *dest_byte;
- uint8_t i, data_byte[4] = {0};
- uint32_t *pdata = (uint32_t *)&data_byte;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
-
- *dest = PP_SMC_TO_HOST_UL(data);
-
- dest += 1;
- byte_count -= 4;
- addr += 4;
- }
-
- if (byte_count) {
- polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
- *pdata = PP_SMC_TO_HOST_UL(data);
- /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
- dest_byte = (uint8_t *)dest;
- for (i = 0; i < byte_count; i++)
- dest_byte[i] = data_byte[i];
- }
-
- return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param pSmuMgr the address of the powerplay SMU manager.
-* @param smc_start_address the start address in the SMC RAM to copy bytes to.
-* @param src the byte array to copy the bytes from.
-* @param byte_count the number of bytes to copy.
-*/
-int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
- const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
- int result;
- uint32_t data = 0;
- uint32_t original_data;
- uint32_t addr = 0;
- uint32_t extra_shift;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first. */
- data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
- result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
- if (0 != result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
-
- data = 0;
-
- result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
- if (0 != result)
- return result;
-
-
- original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
-
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- /* Bytes are written into the SMC addres space with the MSB first. */
- data = (0x100 * data) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
-
- data |= (original_data & ~((~0UL) << extra_shift));
-
- result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
- if (0 != result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
- }
-
- return 0;
-}
-
-
-static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr)
-{
- static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
-
- polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param smumgr the address of the powerplay hardware manager.
-*/
-bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
-{
- uint32_t efuse;
-
- efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
- efuse &= 0x00000001;
- if (efuse)
- return true;
-
- return false;
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
- int ret;
-
- if (!polaris10_is_smc_ram_running(smumgr))
- return -1;
-
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
-
- if (ret != 1)
- printk("\n failed to send pre message %x ret is %d \n", msg, ret);
-
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
-
- if (ret != 1)
- printk("\n failed to send message %x ret is %d \n", msg, ret);
-
- return 0;
-}
-
-
-/**
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return Always return 0.
-*/
-int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
-{
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-/**
-* Send a message to the SMC with parameter
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
- if (!polaris10_is_smc_ram_running(smumgr)) {
- return -1;
- }
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return polaris10_send_msg_to_smc(smumgr, msg);
-}
-
-
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return polaris10_send_msg_to_smc_without_waiting(smumgr, msg);
-}
-
-int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
-{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
- printk("Failed to send Message.\n");
-
- return 0;
-}
-
-/**
-* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr)
-{
- /* If the SMC is not even on it qualifies as inactive. */
- if (!polaris10_is_smc_ram_running(smumgr))
- return -1;
-
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
- return 0;
-}
-
-
-/**
-* Upload the SMC firmware to the SMC microcontroller.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param pFirmware the data structure containing the various sections of the firmware.
-*/
-static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
-{
- uint32_t byte_count = length;
-
- PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1);
+static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
+ 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
-
- for (; byte_count >= 4; byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
-
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
- PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1);
-
- return 0;
-}
-
-static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type)
-{
- enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SMU:
- result = CGS_UCODE_ID_SMU;
- break;
- case UCODE_ID_SMU_SK:
- result = CGS_UCODE_ID_SMU_SK;
- break;
- case UCODE_ID_SDMA0:
- result = CGS_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = CGS_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = CGS_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = CGS_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = CGS_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC:
- result = CGS_UCODE_ID_CP_MEC;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = CGS_UCODE_ID_CP_MEC_JT1;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = CGS_UCODE_ID_CP_MEC_JT2;
- break;
- case UCODE_ID_RLC_G:
- result = CGS_UCODE_ID_RLC_G;
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr)
-{
- int result = 0;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- struct cgs_firmware_info info = {0};
-
- if (smu_data->security_hard_key == 1)
- cgs_get_firmware_info(smumgr->device,
- polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
- else
- cgs_get_firmware_info(smumgr->device,
- polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
-
- /* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
- result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE);
-
- return result;
-}
-
-/**
-* Read a 32bit value from the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smcAddress the address in the SMC RAM to access.
-* @param value and output parameter for the data read from the SMC SRAM.
-*/
-int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
-{
- int result;
-
- result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
-
- if (result)
- return result;
-
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
- return 0;
-}
-
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value to write to the SMC SRAM.
-*/
-int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
-{
- int result;
-
- result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
-
- if (result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
-
- return 0;
-}
-
-
-int polaris10_smu_fini(struct pp_smumgr *smumgr)
-{
- if (smumgr->backend) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- return 0;
-}
-
-/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
-static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type)
-{
- uint32_t result = 0;
-
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- result = UCODE_ID_SDMA0_MASK;
- break;
- case UCODE_ID_SDMA1:
- result = UCODE_ID_SDMA1_MASK;
- break;
- case UCODE_ID_CP_CE:
- result = UCODE_ID_CP_CE_MASK;
- break;
- case UCODE_ID_CP_PFP:
- result = UCODE_ID_CP_PFP_MASK;
- break;
- case UCODE_ID_CP_ME:
- result = UCODE_ID_CP_ME_MASK;
- break;
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- result = UCODE_ID_CP_MEC_MASK;
- break;
- case UCODE_ID_RLC_G:
- result = UCODE_ID_RLC_G_MASK;
- break;
- default:
- printk("UCode type is out of range! \n");
- result = 0;
- }
-
- return result;
-}
-
-/* Populate one firmware image to the data structure */
-
-static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- int result = 0;
- struct cgs_firmware_info info = {0};
-
- result = cgs_get_firmware_info(smumgr->device,
- polaris10_convert_fw_type_to_cgs(fw_type),
- &info);
-
- if (!result) {
- entry->version = info.version;
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
- entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = info.image_size;
- entry->num_register_entries = 0;
- }
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
-
- return 0;
-}
-
-static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t fw_to_load;
-
- int result = 0;
- struct SMU_DRAMData_TOC *toc;
-
- if (!smumgr->reload_fw) {
- printk(KERN_INFO "[ powerplay ] skip reloading...\n");
- return 0;
- }
-
- if (smu_data->soft_regs_start)
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
- smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
- 0x0);
-
- polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
- polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)smu_data->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-
- polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
- polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK
- + UCODE_ID_SDMA0_MASK
- + UCODE_ID_SDMA1_MASK
- + UCODE_ID_CP_CE_MASK
- + UCODE_ID_CP_ME_MASK
- + UCODE_ID_CP_PFP_MASK
- + UCODE_ID_CP_MEC_MASK;
-
- if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
- printk(KERN_ERR "Fail to Request SMU Load uCode");
-
- return result;
-}
-
-/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
-static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type);
- uint32_t ret;
- /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
- ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
- smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
- fw_mask, fw_mask);
-
- return ret;
-}
-
-static int polaris10_reload_firmware(struct pp_smumgr *smumgr)
-{
- return smumgr->smumgr_funcs->start_smu(smumgr);
-}
static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
{
@@ -669,7 +101,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -697,7 +129,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_size = sizeof(avfs_graphics_level_polaris10);
u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
- PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
&dpm_table_start, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -708,14 +140,14 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
return -1);
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&avfs_graphics_level_polaris10),
graphics_level_size, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -723,7 +155,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
return -1);
@@ -732,7 +164,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
- PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1);
@@ -793,7 +225,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = polaris10_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result != 0)
return result;
@@ -812,7 +244,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
/* Call Test SMU message with 0x20000 offset to trigger SMU start */
- polaris10_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(smumgr);
/* Wait done bit to be set */
/* Check pass/failed indicator */
@@ -853,12 +285,12 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
- result = polaris10_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- polaris10_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(smumgr);
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
@@ -881,10 +313,10 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */
- if (!polaris10_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(smumgr)) {
SMU_VFT_INTACT = false;
smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
- smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+ smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
/* Check if SMU is running in protected mode */
if (smu_data->protected_mode == 0) {
@@ -894,7 +326,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
/* If failed, try with different security Key. */
if (result != 0) {
- smu_data->security_hard_key ^= 1;
+ smu_data->smu7_data.security_hard_key ^= 1;
result = polaris10_start_smu_in_protection_mode(smumgr);
}
}
@@ -906,89 +338,69 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
} else
SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
- smu_data->post_initial_boot = true;
polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
- polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
- &(smu_data->soft_regs_start), 0x40000);
+ smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &(smu_data->smu7_data.soft_regs_start), 0x40000);
- result = polaris10_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(smumgr);
return result;
}
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+ uint32_t efuse;
+
+ efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse &= 0x00000001;
+ if (efuse)
+ return true;
+
+ return false;
+}
+
static int polaris10_smu_init(struct pp_smumgr *smumgr)
{
- struct polaris10_smumgr *smu_data;
- uint8_t *internal_buf;
- uint64_t mc_addr = 0;
- /* Allocate memory for backend private data */
- smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- smu_data->header_buffer.data_size =
- ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- smu_data->smu_buffer.data_size = 200*4096;
- smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
-/* Allocate FW image data structure and header buffer and
- * send the header buffer address to SMU */
- smu_allocate_memory(smumgr->device,
- smu_data->header_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &smu_data->header_buffer.kaddr,
- &smu_data->header_buffer.handle);
-
- smu_data->header = smu_data->header_buffer.kaddr;
- smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != smu_data->header),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)smu_data->header_buffer.handle);
- return -1);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ int i;
-/* Allocate buffer for SMU internal buffer and send the address to SMU.
- * Iceland SMU does not need internal buffer.*/
- smu_allocate_memory(smumgr->device,
- smu_data->smu_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &smu_data->smu_buffer.kaddr,
- &smu_data->smu_buffer.handle);
-
- internal_buf = smu_data->smu_buffer.kaddr;
- smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != internal_buf),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)smu_data->smu_buffer.handle);
- return -1;);
+ if (smu7_init(smumgr))
+ return -EINVAL;
if (polaris10_is_hw_avfs_present(smumgr))
smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
else
smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+ for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
+ smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+
return 0;
}
-static const struct pp_smumgr_func ellsemere_smu_funcs = {
+static const struct pp_smumgr_func polaris10_smu_funcs = {
.smu_init = polaris10_smu_init,
- .smu_fini = polaris10_smu_fini,
+ .smu_fini = smu7_smu_fini,
.start_smu = polaris10_start_smu,
- .check_fw_load_finish = polaris10_check_fw_load_finish,
- .request_smu_load_fw = polaris10_reload_firmware,
+ .check_fw_load_finish = smu7_check_fw_load_finish,
+ .request_smu_load_fw = smu7_reload_firmware,
.request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = polaris10_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter,
+ .send_msg_to_smc = smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
+ .update_smc_table = polaris10_update_smc_table,
+ .get_offsetof = polaris10_get_offsetof,
+ .process_firmware_header = polaris10_process_firmware_header,
+ .init_smc_table = polaris10_init_smc_table,
+ .update_sclk_threshold = polaris10_update_sclk_threshold,
+ .thermal_avfs_enable = polaris10_thermal_avfs_enable,
+ .thermal_setup_fan_table = polaris10_thermal_setup_fan_table,
+ .populate_all_graphic_levels = polaris10_populate_all_graphic_levels,
+ .populate_all_memory_levels = polaris10_populate_all_memory_levels,
+ .get_mac_definition = polaris10_get_mac_definition,
+ .is_dpm_running = polaris10_is_dpm_running,
};
int polaris10_smum_init(struct pp_smumgr *smumgr)
@@ -998,10 +410,10 @@ int polaris10_smum_init(struct pp_smumgr *smumgr)
polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
if (polaris10_smu == NULL)
- return -1;
+ return -EINVAL;
smumgr->backend = polaris10_smu;
- smumgr->smumgr_funcs = &ellsemere_smu_funcs;
+ smumgr->smumgr_funcs = &polaris10_smu_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index e5377aec057f..49ebf1d5a53c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -24,45 +24,52 @@
#ifndef _POLARIS10_SMUMANAGER_H
#define _POLARIS10_SMUMANAGER_H
-#include <polaris10_ppsmc.h>
+
#include <pp_endian.h>
+#include "smu74.h"
+#include "smu74_discrete.h"
+#include "smu7_smumgr.h"
+
+#define SMC_RAM_END 0x40000
struct polaris10_avfs {
enum AVFS_BTC_STATUS avfs_btc_status;
uint32_t avfs_btc_param;
};
-struct polaris10_buffer_entry {
- uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
- void *kaddr;
- unsigned long handle;
+struct polaris10_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+
+ uint32_t DisplayCac;
+ uint32_t BAPM_TEMP_GRADIENT;
+ uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+ uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+};
+
+
+
+struct polaris10_range_table {
+ uint32_t trans_lower_frequency; /* in 10khz */
+ uint32_t trans_upper_frequency;
};
struct polaris10_smumgr {
- uint8_t *header;
- uint8_t *mec_image;
- struct polaris10_buffer_entry smu_buffer;
- struct polaris10_buffer_entry header_buffer;
- uint32_t soft_regs_start;
- uint8_t *read_rrm_straps;
- uint32_t read_drm_straps_mc_address_high;
- uint32_t read_drm_straps_mc_address_low;
- uint32_t acpi_optimization;
- bool post_initial_boot;
+ struct smu7_smumgr smu7_data;
uint8_t protected_mode;
- uint8_t security_hard_key;
struct polaris10_avfs avfs;
+ SMU74_Discrete_DpmTable smc_state_table;
+ struct SMU74_Discrete_Ulv ulv_setting;
+ struct SMU74_Discrete_PmFuses power_tune_table;
+ struct polaris10_range_table range_table[NUM_SCLK_RANGE];
+ const struct polaris10_pt_defaults *power_tune_defaults;
+ uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+ uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
};
-int polaris10_smum_init(struct pp_smumgr *smumgr);
-
-int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit);
-int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit);
-int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
- const uint8_t *src, uint32_t byte_count, uint32_t limit);
-
#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
new file mode 100644
index 000000000000..6af744f42ec9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#include "smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
+
+#define SMU7_SMC_SIZE 0x20000
+
+static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+{
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
+
+ cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
+ return 0;
+}
+
+
+int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+{
+ uint32_t data;
+ uint32_t addr;
+ uint8_t *dest_byte;
+ uint8_t i, data_byte[4] = {0};
+ uint32_t *pdata = (uint32_t *)&data_byte;
+
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+
+ *dest = PP_SMC_TO_HOST_UL(data);
+
+ dest += 1;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (byte_count) {
+ smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ *pdata = PP_SMC_TO_HOST_UL(data);
+ /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
+ dest_byte = (uint8_t *)dest;
+ for (i = 0; i < byte_count; i++)
+ dest_byte[i] = data_byte[i];
+ }
+
+ return 0;
+}
+
+
+int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+ int result;
+ uint32_t data = 0;
+ uint32_t original_data;
+ uint32_t addr = 0;
+ uint32_t extra_shift;
+
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+ result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (0 != byte_count) {
+
+ data = 0;
+
+ result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+
+ original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = (0x100 * data) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ }
+
+ return 0;
+}
+
+
+int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
+{
+ static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+
+ return 0;
+}
+
+bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
+{
+ return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+{
+ int ret;
+
+ if (!smu7_is_smc_ram_running(smumgr))
+ return -EINVAL;
+
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ printk("\n failed to send pre message %x ret is %d \n", msg, ret);
+
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ printk("\n failed to send message %x ret is %d \n", msg, ret);
+
+ return 0;
+}
+
+int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+{
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+ return 0;
+}
+
+int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+ if (!smu7_is_smc_ram_running(smumgr)) {
+ return -EINVAL;
+ }
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+ return smu7_send_msg_to_smc(smumgr, msg);
+}
+
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+ return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
+}
+
+int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+{
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+ printk("Failed to send Message.\n");
+
+ return 0;
+}
+
+int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+{
+ if (!smu7_is_smc_ram_running(smumgr))
+ return -EINVAL;
+
+ SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+ return 0;
+}
+
+
+enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+ enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+ switch (fw_type) {
+ case UCODE_ID_SMU:
+ result = CGS_UCODE_ID_SMU;
+ break;
+ case UCODE_ID_SMU_SK:
+ result = CGS_UCODE_ID_SMU_SK;
+ break;
+ case UCODE_ID_SDMA0:
+ result = CGS_UCODE_ID_SDMA0;
+ break;
+ case UCODE_ID_SDMA1:
+ result = CGS_UCODE_ID_SDMA1;
+ break;
+ case UCODE_ID_CP_CE:
+ result = CGS_UCODE_ID_CP_CE;
+ break;
+ case UCODE_ID_CP_PFP:
+ result = CGS_UCODE_ID_CP_PFP;
+ break;
+ case UCODE_ID_CP_ME:
+ result = CGS_UCODE_ID_CP_ME;
+ break;
+ case UCODE_ID_CP_MEC:
+ result = CGS_UCODE_ID_CP_MEC;
+ break;
+ case UCODE_ID_CP_MEC_JT1:
+ result = CGS_UCODE_ID_CP_MEC_JT1;
+ break;
+ case UCODE_ID_CP_MEC_JT2:
+ result = CGS_UCODE_ID_CP_MEC_JT2;
+ break;
+ case UCODE_ID_RLC_G:
+ result = CGS_UCODE_ID_RLC_G;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+
+int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+{
+ int result;
+
+ result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ return 0;
+}
+
+int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+{
+ int result;
+
+ result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+
+ return 0;
+}
+
+/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
+
+static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
+{
+ uint32_t result = 0;
+
+ switch (fw_type) {
+ case UCODE_ID_SDMA0:
+ result = UCODE_ID_SDMA0_MASK;
+ break;
+ case UCODE_ID_SDMA1:
+ result = UCODE_ID_SDMA1_MASK;
+ break;
+ case UCODE_ID_CP_CE:
+ result = UCODE_ID_CP_CE_MASK;
+ break;
+ case UCODE_ID_CP_PFP:
+ result = UCODE_ID_CP_PFP_MASK;
+ break;
+ case UCODE_ID_CP_ME:
+ result = UCODE_ID_CP_ME_MASK;
+ break;
+ case UCODE_ID_CP_MEC:
+ case UCODE_ID_CP_MEC_JT1:
+ case UCODE_ID_CP_MEC_JT2:
+ result = UCODE_ID_CP_MEC_MASK;
+ break;
+ case UCODE_ID_RLC_G:
+ result = UCODE_ID_RLC_G_MASK;
+ break;
+ default:
+ printk("UCode type is out of range! \n");
+ result = 0;
+ }
+
+ return result;
+}
+
+static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+ uint32_t fw_type,
+ struct SMU_Entry *entry)
+{
+ int result = 0;
+ struct cgs_firmware_info info = {0};
+
+ result = cgs_get_firmware_info(smumgr->device,
+ smu7_convert_fw_type_to_cgs(fw_type),
+ &info);
+
+ if (!result) {
+ entry->version = info.version;
+ entry->id = (uint16_t)fw_type;
+ entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
+ entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
+ entry->meta_data_addr_high = 0;
+ entry->meta_data_addr_low = 0;
+ entry->data_size_byte = info.image_size;
+ entry->num_register_entries = 0;
+ }
+
+ if (fw_type == UCODE_ID_RLC_G)
+ entry->flags = 1;
+ else
+ entry->flags = 0;
+
+ return 0;
+}
+
+int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
+{
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ uint32_t fw_to_load;
+ int result = 0;
+ struct SMU_DRAMData_TOC *toc;
+
+ if (!smumgr->reload_fw) {
+ printk(KERN_INFO "[ powerplay ] skip reloading...\n");
+ return 0;
+ }
+
+ if (smu_data->soft_regs_start)
+ cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ SMU_SoftRegisters, UcodeLoadStatus),
+ 0x0);
+
+ if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
+ smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
+ fw_to_load = UCODE_ID_RLC_G_MASK
+ + UCODE_ID_SDMA0_MASK
+ + UCODE_ID_SDMA1_MASK
+ + UCODE_ID_CP_CE_MASK
+ + UCODE_ID_CP_ME_MASK
+ + UCODE_ID_CP_PFP_MASK
+ + UCODE_ID_CP_MEC_MASK;
+ } else {
+ fw_to_load = UCODE_ID_RLC_G_MASK
+ + UCODE_ID_SDMA0_MASK
+ + UCODE_ID_SDMA1_MASK
+ + UCODE_ID_CP_CE_MASK
+ + UCODE_ID_CP_ME_MASK
+ + UCODE_ID_CP_PFP_MASK
+ + UCODE_ID_CP_MEC_MASK
+ + UCODE_ID_CP_MEC_JT1_MASK
+ + UCODE_ID_CP_MEC_JT2_MASK;
+ }
+
+ toc = (struct SMU_DRAMData_TOC *)smu_data->header;
+ toc->num_entries = 0;
+ toc->structure_version = 1;
+
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+
+ smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+
+ if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+ printk(KERN_ERR "Fail to Request SMU Load uCode");
+
+ return result;
+}
+
+/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
+int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+{
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
+ uint32_t ret;
+
+ ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
+ smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ SMU_SoftRegisters, UcodeLoadStatus),
+ fw_mask, fw_mask);
+
+ return ret;
+}
+
+int smu7_reload_firmware(struct pp_smumgr *smumgr)
+{
+ return smumgr->smumgr_funcs->start_smu(smumgr);
+}
+
+static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+{
+ uint32_t byte_count = length;
+
+ PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+ cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+
+ for (; byte_count >= 4; byte_count -= 4)
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+
+ return 0;
+}
+
+
+int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+{
+ int result = 0;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+ struct cgs_firmware_info info = {0};
+
+ if (smu_data->security_hard_key == 1)
+ cgs_get_firmware_info(smumgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+ else
+ cgs_get_firmware_info(smumgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
+
+ result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
+
+ return result;
+}
+
+
+int smu7_init(struct pp_smumgr *smumgr)
+{
+ struct smu7_smumgr *smu_data;
+ uint8_t *internal_buf;
+ uint64_t mc_addr = 0;
+
+ /* Allocate memory for backend private data */
+ smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ smu_data->header_buffer.data_size =
+ ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+ smu_data->smu_buffer.data_size = 200*4096;
+
+/* Allocate FW image data structure and header buffer and
+ * send the header buffer address to SMU */
+ smu_allocate_memory(smumgr->device,
+ smu_data->header_buffer.data_size,
+ CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+ PAGE_SIZE,
+ &mc_addr,
+ &smu_data->header_buffer.kaddr,
+ &smu_data->header_buffer.handle);
+
+ smu_data->header = smu_data->header_buffer.kaddr;
+ smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+ smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+ PP_ASSERT_WITH_CODE((NULL != smu_data->header),
+ "Out of memory.",
+ kfree(smumgr->backend);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)smu_data->header_buffer.handle);
+ return -EINVAL);
+
+ smu_allocate_memory(smumgr->device,
+ smu_data->smu_buffer.data_size,
+ CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+ PAGE_SIZE,
+ &mc_addr,
+ &smu_data->smu_buffer.kaddr,
+ &smu_data->smu_buffer.handle);
+
+ internal_buf = smu_data->smu_buffer.kaddr;
+ smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+ smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+ PP_ASSERT_WITH_CODE((NULL != internal_buf),
+ "Out of memory.",
+ kfree(smumgr->backend);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)smu_data->smu_buffer.handle);
+ return -EINVAL);
+
+ return 0;
+}
+
+
+int smu7_smu_fini(struct pp_smumgr *smumgr)
+{
+ if (smumgr->backend) {
+ kfree(smumgr->backend);
+ smumgr->backend = NULL;
+ }
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
new file mode 100644
index 000000000000..76352f2423ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_SMUMANAGER_H
+#define _SMU7_SMUMANAGER_H
+
+
+#include <pp_endian.h>
+
+#define SMC_RAM_END 0x40000
+#define mmSMC_IND_INDEX_11 0x01AC
+#define mmSMC_IND_DATA_11 0x01AD
+
+struct smu7_buffer_entry {
+ uint32_t data_size;
+ uint32_t mc_addr_low;
+ uint32_t mc_addr_high;
+ void *kaddr;
+ unsigned long handle;
+};
+
+struct smu7_smumgr {
+ uint8_t *header;
+ uint8_t *mec_image;
+ struct smu7_buffer_entry smu_buffer;
+ struct smu7_buffer_entry header_buffer;
+
+ uint32_t soft_regs_start;
+ uint32_t dpm_table_start;
+ uint32_t mc_reg_table_start;
+ uint32_t fan_table_start;
+ uint32_t arb_table_start;
+ uint32_t ulv_setting_starts;
+ uint8_t security_hard_key;
+ uint32_t acpi_optimization;
+};
+
+
+int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+ uint32_t *dest, uint32_t byte_count, uint32_t limit);
+int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit);
+int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
+bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
+int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
+int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
+int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
+ uint32_t parameter);
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
+ uint16_t msg, uint32_t parameter);
+int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
+int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
+
+enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
+int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+ uint32_t *value, uint32_t limit);
+int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+ uint32_t value, uint32_t limit);
+
+int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
+int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
+int smu7_reload_firmware(struct pp_smumgr *smumgr);
+int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
+int smu7_init(struct pp_smumgr *smumgr);
+int smu7_smu_fini(struct pp_smumgr *smumgr);
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 7723473e51a0..e5812aa456f3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -28,10 +28,7 @@
#include "smumgr.h"
#include "cgs_common.h"
#include "linux/delay.h"
-#include "cz_smumgr.h"
-#include "tonga_smumgr.h"
-#include "fiji_smumgr.h"
-#include "polaris10_smumgr.h"
+
int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
{
@@ -47,7 +44,6 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
smumgr->device = pp_init->device;
smumgr->chip_family = pp_init->chip_family;
smumgr->chip_id = pp_init->chip_id;
- smumgr->hw_revision = pp_init->rev_id;
smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
smumgr->reload_fw = 1;
handle->smu_mgr = smumgr;
@@ -58,6 +54,9 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case AMDGPU_FAMILY_VI:
switch (smumgr->chip_id) {
+ case CHIP_TOPAZ:
+ iceland_smum_init(smumgr);
+ break;
case CHIP_TONGA:
tonga_smum_init(smumgr);
break;
@@ -87,6 +86,57 @@ int smum_fini(struct pp_smumgr *smumgr)
return 0;
}
+int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
+ return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
+
+ return 0;
+}
+
+int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
+ return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
+
+ return 0;
+}
+
+int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+
+ if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
+ return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
+
+ return 0;
+}
+
+int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+
+ if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
+ return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
+
+ return 0;
+}
+
+uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
+{
+ if (NULL != smumgr->smumgr_funcs->get_offsetof)
+ return smumgr->smumgr_funcs->get_offsetof(type, member);
+
+ return 0;
+}
+
+int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
+ return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
+ return 0;
+}
+
int smum_get_argument(struct pp_smumgr *smumgr)
{
if (NULL != smumgr->smumgr_funcs->get_argument)
@@ -95,13 +145,20 @@ int smum_get_argument(struct pp_smumgr *smumgr)
return 0;
}
+uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
+{
+ if (NULL != smumgr->smumgr_funcs->get_mac_definition)
+ return smumgr->smumgr_funcs->get_mac_definition(value);
+
+ return 0;
+}
+
int smum_download_powerplay_table(struct pp_smumgr *smumgr,
void **table)
{
if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
table);
-
return 0;
}
@@ -268,3 +325,44 @@ int smu_free_memory(void *device, void *handle)
return 0;
}
+
+int smum_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
+ return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
+
+ return 0;
+}
+
+int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
+ return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
+
+ return 0;
+}
+
+int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
+ return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
+
+ return 0;
+}
+
+/*this interface is needed by island ci/vi */
+int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
+ return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
+
+ return 0;
+}
+
+bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
+ return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
new file mode 100644
index 000000000000..de2a24d85f48
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -0,0 +1,3206 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ *
+ */
+
+#include "tonga_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "tonga_smumgr.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "smu7_ppsmc.h"
+
+#include "smu72_discrete.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+
+static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ {1, 0xF, 0xFD, 0x19,
+ 5, 45, 0, 0xB0000,
+ {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
+ 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
+ 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
+ },
+};
+
+/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
+static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0},
+ {600, 1050, 6, 1}
+};
+
+/* [FF, SS] type, [] 4 voltage ranges,
+ * and [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
+};
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
+static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5},
+ {0, 2, 4, 5, 6, 5}
+};
+
+/* PPGen has the gain setting generated in x * 100 unit
+ * This function is to convert the unit to x * 4096(0x1000) unit.
+ * This is the unit expected by SMC firmware
+ */
+
+
+static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ voltage->VddGfx = phm_get_voltage_index(
+ pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(
+ pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddc);
+
+ if (allowed_clock_voltage_table->entries[i].vddci)
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
+ else
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
+
+
+ if (allowed_clock_voltage_table->entries[i].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
+
+ voltage->Phases = 1;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddc);
+
+ if (allowed_clock_voltage_table->entries[i-1].vddci)
+ voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i-1].vddci);
+
+ if (allowed_clock_voltage_table->entries[i-1].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
+
+ return 0;
+}
+
+
+/**
+ * Vddc table preparation for SMC.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ table->VddcTable[count] =
+ PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+ }
+ return 0;
+}
+
+/**
+ * VddGfx table preparation for SMC.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
+ for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
+ table->VddGfxTable[count] =
+ PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
+ }
+ return 0;
+}
+
+/**
+ * Vddci table preparation for SMC.
+ *
+ * @param *hwmgr The address of the hardware manager.
+ * @param *table The SMC DPM table structure to be populated.
+ * @return 0
+ */
+static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ table->SmioTable1.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
+ table->SmioTable1.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->vddci_voltage_table.entries[count].smio_low;
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param *hwmgr The address of the hardware manager.
+ * @param *table The SMC DPM table structure to be populated.
+ * @return 0
+ */
+static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ table->SmioTable2.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->mvdd_voltage_table.entries[count].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+ }
+
+ return 0;
+}
+
+/**
+ * Preparation of vddc and vddgfx CAC tables for SMC.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
+ pptable_info->vddgfx_lookup_table;
+ struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
+ pptable_info->vddc_lookup_table;
+
+ /* table is already swapped, so in order to use the value from it
+ * we need to swap it back.
+ */
+ uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
+ uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
+
+ for (count = 0; count < vddc_level_count; count++) {
+ /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+
+ if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+ /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
+ for (count = 0; count < vddgfx_level_count; count++) {
+ index = phm_get_voltage_index(vddgfx_lookup_table,
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
+ }
+ } else {
+ for (count = 0; count < vddc_level_count; count++) {
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddGfxVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddGfxVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Preparation of voltage tables for SMC.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+
+static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = tonga_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDC voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDCI voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDGFX voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate MVDD voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_cac_tables(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_Ulv *state)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_DpmTable *table)
+{
+ return tonga_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param engine_clock the engine clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param engine_clock the engine clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU72_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = tonga_get_dependecy_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_sclk, engine_clock,
+ &graphic_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find VDDC voltage value for VDDC "
+ "engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 0;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (!result) {
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param hwmgr the address of the hardware manager
+ */
+int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
+ uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
+ SMU72_MAX_LEVELS_GRAPHICS;
+
+ SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i, max_entry;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = tonga_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ }
+ } else {
+ if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
+ printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !");
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(highest_pcie_level_enabled+1))) != 0)) {
+ highest_pcie_level_enabled++;
+ }
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<lowest_pcie_level_enabled)) == 0)) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
+ count++;
+ }
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param memory_clock the memory clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int tonga_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "Error retrieving Memory Clock Parameters from VBIOS.",
+ return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
+ mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF,
+ mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC,
+ mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE,
+ mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL,
+ mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+ return mc_para_index;
+}
+
+
+static int tonga_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *memory_level
+ )
+{
+ uint32_t mvdd = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_stutter_mode_threshold = 30000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (NULL != pptable_info->vdd_dep_on_mclk) {
+ result = tonga_get_dependecy_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_mclk,
+ memory_clock,
+ &memory_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "can not find MinVddc voltage value from memory VDDC "
+ "voltage dependency table",
+ return result);
+ }
+
+ if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+ memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else
+ memory_level->MinMvdd = mvdd;
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if ((mclk_stutter_mode_threshold != 0) &&
+ (memory_clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled)
+ && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+ && (data->display_timing.num_existing_displays <= 2)
+ && (data->display_timing.num_existing_displays != 0))
+ memory_level->StutterEnable = 1;
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ } else {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ } else {
+ dll_state_on = data->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio =
+ tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = tonga_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_address =
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size =
+ sizeof(SMU72_Discrete_MemoryLevel) *
+ SMU72_MAX_LEVELS_MEMORY;
+ SMU72_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = tonga_populate_single_memory_level(
+ hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pattern)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ smio_pattern->Voltage =
+ data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+
+ SMIO_Pattern voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ table->ACPILevel.MinVoltage =
+ smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
+
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVoltage =
+ smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
+
+ /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+ if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t) (mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->UvdLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->UvdLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->UvdLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(
+ hwmgr,
+ table->UvdLevel[count].VclkFrequency,
+ &dividers);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Vclk clock",
+ return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Dclk clock",
+ return result);
+
+ table->UvdLevel[count].DclkDivider =
+ (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ }
+
+ return result;
+
+}
+
+static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t) (mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->VceLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->VceLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->VceLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t) (mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->AcpLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->AcpLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->AcpLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t) (mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->SamuLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->SamuLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->SamuLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ * This function is to be called from the SetPowerState table.
+ */
+static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ int result = 0;
+ SMU72_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = tonga_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (result)
+ break;
+ }
+ }
+
+ if (!result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU72_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ printk(KERN_ERR "[powerplay] VBIOS did not find boot engine "
+ "clock value in dependency table. "
+ "Using Graphics DPM level 0 !");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ printk(KERN_ERR "[powerplay] VBIOS did not find boot "
+ "engine clock value in dependency table."
+ "Using Memory DPM level 0 !");
+ result = 0;
+ }
+
+ table->BootVoltage.Vddc =
+ phm_get_voltage_id(&(data->vddc_voltage_table),
+ data->vbios_boot_state.vddc_bootup_value);
+ table->BootVoltage.VddGfx =
+ phm_get_voltage_id(&(data->vddgfx_voltage_table),
+ data->vbios_boot_state.vddgfx_bootup_value);
+ table->BootVoltage.Vddci =
+ phm_get_voltage_id(&(data->vddci_voltage_table),
+ data->vbios_boot_state.vddci_bootup_value);
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return result;
+}
+
+static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ uint32_t hw_revision, dev_id;
+ struct cgs_system_info sys_info = {0};
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ hw_revision = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
+ volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
+ (sclk_table->entries[i].clk/100) / 10000) * 1000 /
+ (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
+ volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
+ (sclk_table->entries[i].clk/100) / 100000) * 1000 /
+ (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
+ } else {
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ }
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (tonga_clock_stretch_amount_conversion
+ [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+/**
+ * Populates the SMC VRConfig field in DPM table.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ /* Splitted mode */
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= config;
+ } else {
+ printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should "
+ "be both on SVI2 control in splitted mode !\n");
+ }
+ } else {
+ /* Merged mode */
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ printk(KERN_ERR "[ powerplay ] VDDC should be on "
+ "SVI2 control in merged mode !\n");
+ }
+ }
+
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ }
+
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+
+/**
+ * Initialize the ARB DRAM timing table's index field.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ uint32_t tmp;
+ int result;
+
+ /*
+ * This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result != 0)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+
+static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ int i, j, k;
+ const uint16_t *pdef1, *pdef2;
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range !",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ dpm_table->BAPM_TEMP_GRADIENT =
+ PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+ pdef1 = defaults->bapmti_r;
+ pdef2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU72_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef1);
+ dpm_table->BAPMTI_RC[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 "
+ "(SviLoadLineEn) from SMC Failed !",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed !",
+ return -EINVAL);
+
+ /* DW6 */
+ if (tonga_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed !",
+ return -EINVAL);
+ /* DW7 */
+ if (tonga_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed !",
+ return -EINVAL);
+ /* DW8 */
+ if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl Failed !",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (tonga_populate_temperature_scaler(hwmgr) != 0)
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed !",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (tonga_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan "
+ "Control parameters Failed !",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (tonga_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed !",
+ return -EINVAL);
+
+ /* DW19 */
+ if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML "
+ "Min and Max Vid Failed !",
+ return -EINVAL);
+
+ /* DW20 */
+ if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(
+ false,
+ "Attempt to populate BapmVddCBaseLeakage "
+ "Hi and Lo Sidd Failed !",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed !",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
+ SMU72_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(
+ i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array "
+ "out of boundary",
+ return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void tonga_convert_mc_registers(
+ const struct tonga_mc_reg_entry *entry,
+ SMU72_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int tonga_convert_mc_reg_table_entry_to_smc(
+ struct pp_smumgr *smumgr,
+ const uint32_t memory_clock,
+ SMU72_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = tonga_convert_mc_reg_table_entry_to_smc(
+ hwmgr->smumgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start +
+ (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(
+ hwmgr->smumgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU72_Discrete_MCRegisterSet) *
+ data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
+ result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for the MC register addresses !",
+ return result;);
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for driver state !",
+ return result;);
+
+ return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &tonga_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pInput the pointer to input data (PowerState)
+ * @return always 0
+ */
+int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ uint8_t i;
+ pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+
+
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ tonga_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ tonga_populate_smc_voltage_tables(hwmgr, table);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
+
+ if (i == 1 || i == 0)
+ table->SystemFlags |= 0x40;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = tonga_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ULV state !",
+ return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = tonga_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Link Level !", return result);
+
+ result = tonga_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Graphics Level !", return result);
+
+ result = tonga_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Memory Level !", return result);
+
+ result = tonga_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACPI Level !", return result);
+
+ result = tonga_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize VCE Level !", return result);
+
+ result = tonga_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACP Level !", return result);
+
+ result = tonga_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize SAMU Level !", return result);
+
+ /* Since only the initial state is completely set up at this
+ * point (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = tonga_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to Write ARB settings for the initial state.",
+ return result;);
+
+ result = tonga_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize UVD Level !", return result);
+
+ result = tonga_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Boot Level !", return result);
+
+ tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate BAPM Parameters !", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = tonga_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate Clock Stretcher Data Table !",
+ return result;);
+ }
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+
+ /*
+ * Cail reads current link status and reports it as cap (we cannot
+ * change this due to some previous issues we had)
+ * SMC drops the link status to lowest level after enabling
+ * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
+ * but this time Cail reads current link status which was set to low by
+ * SMC and reports it as cap to powerplay
+ * To avoid it, we set PCIeBootLinkLevel to highest dpm level
+ */
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
+
+ table->PCIeGenInterval = 1;
+
+ result = tonga_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate VRConfig setting !", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+
+ if (0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr,
+ THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+
+ table->ThermOutPolarity =
+ (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
+
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO*/
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal)){
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ }
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload dpm data to SMC memory !", return result;);
+
+ result = tonga_init_arb_table_index(hwmgr->smumgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload arb data to SMC memory !", return result);
+
+ tonga_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize pm fuses !", return result);
+
+ result = tonga_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize MC Reg table !", return result);
+
+ return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (0 == smu_data->smu7_data.fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ fan_table.FanControl_GL_Flag = 1;
+
+ res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table,
+ (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return tonga_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = tonga_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to upload MC reg table !",
+ return result);
+
+ result = tonga_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters !",
+ );
+
+ return result;
+}
+
+uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU72_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU72_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU72_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ printk("cant't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+uint32_t tonga_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU72_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU72_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU72_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU72_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU72_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU72_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU72_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU72_MAX_LEVELS_MVDD;
+ }
+ printk("cant't get the mac value %x\n", value);
+
+ return 0;
+}
+
+
+static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ tonga_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ tonga_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ tonga_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (result != 0);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (result != 0);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
+ &address) ?
+ address :
+ table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct tonga_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++)
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
+ * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
+ * mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
+ * mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3. need to set these data for each clock range
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param table the address of MCRegTable
+ * @return always 0
+ */
+static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device,
+ mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (!result)
+ result = tonga_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (!result) {
+ tonga_set_s0_mc_reg_index(ni_table);
+ result = tonga_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (!result)
+ tonga_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
index 8e6670b3cb67..8ae169ff541d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
@@ -20,35 +20,19 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#ifndef _TONGA_SMC_H
+#define _TONGA_SMC_H
-#ifndef TONGA_POWERTUNE_H
-#define TONGA_POWERTUNE_H
+#include "smumgr.h"
+#include "smu72.h"
-enum _phw_tonga_ptc_config_reg_type {
- TONGA_CONFIGREG_MMR = 0,
- TONGA_CONFIGREG_SMC_IND,
- TONGA_CONFIGREG_DIDT_IND,
- TONGA_CONFIGREG_CACHE,
- TONGA_CONFIGREG_MAX
-};
-typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type;
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+#define ASICID_IS_TONGA_P(wDID, bRID) \
+ (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
+ || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
-struct _phw_tonga_pt_config_reg {
- uint32_t Offset;
- uint32_t Mask;
- uint32_t Shift;
- uint32_t Value;
- phw_tonga_ptc_config_reg_type Type;
-};
-typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg;
-struct _phw_tonga_pt_defaults {
+struct tonga_pt_defaults {
uint8_t svi_load_line_en;
uint8_t svi_load_line_vddC;
uint8_t tdc_vddc_throttle_release_limit_perc;
@@ -60,7 +44,17 @@ struct _phw_tonga_pt_defaults {
uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
};
-typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults;
+int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
+int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
+uint32_t tonga_get_mac_definition(uint32_t value);
+int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
+int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index f42c536b3af1..5f9124046b9b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -33,587 +33,9 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#include "cgs_common.h"
+#include "tonga_smc.h"
+#include "smu7_smumgr.h"
-#define TONGA_SMC_SIZE 0x20000
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072 /*128 *1024*/
-
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smcAddress the address in the SMC RAM to access.
-*/
-static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr,
- uint32_t smcAddress, uint32_t limit)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
- PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
- "SMC address must be 4 byte aligned.",
- return -1;);
-
- PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
- "SMC address is beyond the SMC RAM area.",
- return -1;);
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
- return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param smumgr the address of the powerplay SMU manager.
-* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
-* @param src the byte array to copy the bytes from.
-* @param byteCount the number of bytes to copy.
-*/
-int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
- uint32_t smcStartAddress, const uint8_t *src,
- uint32_t byteCount, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
-
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
- PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
- "SMC address must be 4 byte aligned.",
- return 0;);
-
- PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
- "SMC address is beyond the SMC RAM area.",
- return 0;);
-
- addr = smcStartAddress;
-
- while (byteCount >= 4) {
- /*
- * Bytes are written into the
- * SMC address space with the MSB first
- */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = tonga_set_smc_sram_address(smumgr, addr, limit);
-
- if (result)
- goto out;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-
- src += 4;
- byteCount -= 4;
- addr += 4;
- }
-
- if (0 != byteCount) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = tonga_set_smc_sram_address(smumgr, addr, limit);
- if (result)
- goto out;
-
- orig_data = cgs_read_register(smumgr->device,
- mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byteCount);
-
- while (byteCount > 0) {
- data = (data << 8) + *src++;
- byteCount--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = tonga_set_smc_sram_address(smumgr, addr, limit);
- if (result)
- goto out;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
- }
-
-out:
- return result;
-}
-
-
-int tonga_program_jump_on_start(struct pp_smumgr *smumgr)
-{
- static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
-
- tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
-
- return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param smumgr the address of the powerplay hardware manager.
-*/
-static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
- SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device,
- CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- return 0;
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- if (!tonga_is_smc_ram_running(smumgr))
- return -1;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- PP_ASSERT_WITH_CODE(
- 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
- "Failed to send Previous Message.",
- );
-
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- PP_ASSERT_WITH_CODE(
- 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
- "Failed to send Message.",
- );
-
- return 0;
-}
-
-/*
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_without_waiting
- (struct pp_smumgr *smumgr, uint16_t msg)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- PP_ASSERT_WITH_CODE(
- 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
- "Failed to send Previous Message.",
- );
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-/*
-* Send a message to the SMC with parameter
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- if (!tonga_is_smc_ram_running(smumgr))
- return PPSMC_Result_Failed;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc(smumgr, msg);
-}
-
-/*
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc_without_waiting(smumgr, msg);
-}
-
-/*
- * Read a 32bit value from the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value and output parameter for the data read from the SMC SRAM.
- */
-int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr,
- uint32_t smcAddress, uint32_t *value,
- uint32_t limit)
-{
- int result;
-
- result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
-
- if (0 != result)
- return result;
-
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
-
- return 0;
-}
-
-/*
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value to write to the SMC SRAM.
- */
-int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
- uint32_t smcAddress, uint32_t value,
- uint32_t limit)
-{
- int result;
-
- result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
-
- if (0 != result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
-
- return 0;
-}
-
-static int tonga_smu_fini(struct pp_smumgr *smumgr)
-{
- struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
-
- smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
- smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
-
- if (smumgr->backend != NULL) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
-
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- return 0;
-}
-
-static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type)
-{
- enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SMU:
- result = CGS_UCODE_ID_SMU;
- break;
- case UCODE_ID_SDMA0:
- result = CGS_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = CGS_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = CGS_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = CGS_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = CGS_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC:
- result = CGS_UCODE_ID_CP_MEC;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = CGS_UCODE_ID_CP_MEC_JT1;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = CGS_UCODE_ID_CP_MEC_JT2;
- break;
- case UCODE_ID_RLC_G:
- result = CGS_UCODE_ID_RLC_G;
- break;
- default:
- break;
- }
-
- return result;
-}
-
-/**
- * Convert the PPIRI firmware type to SMU type mask.
- * For MEC, we need to check all MEC related type
-*/
-static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType)
-{
- uint16_t result = 0;
-
- switch (firmwareType) {
- case UCODE_ID_SDMA0:
- result = UCODE_ID_SDMA0_MASK;
- break;
- case UCODE_ID_SDMA1:
- result = UCODE_ID_SDMA1_MASK;
- break;
- case UCODE_ID_CP_CE:
- result = UCODE_ID_CP_CE_MASK;
- break;
- case UCODE_ID_CP_PFP:
- result = UCODE_ID_CP_PFP_MASK;
- break;
- case UCODE_ID_CP_ME:
- result = UCODE_ID_CP_ME_MASK;
- break;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- result = UCODE_ID_CP_MEC_MASK;
- break;
- case UCODE_ID_RLC_G:
- result = UCODE_ID_RLC_G_MASK;
- break;
- default:
- break;
- }
-
- return result;
-}
-
-/**
- * Check if the FW has been loaded,
- * SMU will not return if loading has not finished.
-*/
-static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
-{
- uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType);
-
- if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
- SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) {
- printk(KERN_ERR "[ powerplay ] check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Populate one firmware image to the data structure */
-static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr,
- uint16_t firmware_type,
- struct SMU_Entry *pentry)
-{
- int result;
- struct cgs_firmware_info info = {0};
-
- result = cgs_get_firmware_info(
- smumgr->device,
- tonga_convert_fw_type_to_cgs(firmware_type),
- &info);
-
- if (result == 0) {
- pentry->version = 0;
- pentry->id = (uint16_t)firmware_type;
- pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
- pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
- pentry->meta_data_addr_high = 0;
- pentry->meta_data_addr_low = 0;
- pentry->data_size_byte = info.image_size;
- pentry->num_register_entries = 0;
-
- if (firmware_type == UCODE_ID_RLC_G)
- pentry->flags = 1;
- else
- pentry->flags = 0;
- } else {
- return result;
- }
-
- return result;
-}
-
-static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
-{
- struct tonga_smumgr *tonga_smu =
- (struct tonga_smumgr *)(smumgr->backend);
- uint16_t fw_to_load;
- struct SMU_DRAMData_TOC *toc;
- /**
- * First time this gets called during SmuMgr init,
- * we haven't processed SMU header file yet,
- * so Soft Register Start offset is unknown.
- * However, for this case, UcodeLoadStatus is already 0,
- * so we can skip this if the Soft Registers Start offset is 0.
- */
- cgs_write_ind_register(smumgr->device,
- CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0);
-
- tonga_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_SMU_DRAM_ADDR_HI,
- tonga_smu->smu_buffer.mc_addr_high);
- tonga_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_SMU_DRAM_ADDR_LO,
- tonga_smu->smu_buffer.mc_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry(smumgr,
- UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n",
- return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n",
- return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
-
- tonga_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_DRV_DRAM_ADDR_HI,
- tonga_smu->header_buffer.mc_addr_high);
- tonga_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_DRV_DRAM_ADDR_LO,
- tonga_smu->header_buffer.mc_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK
- + UCODE_ID_SDMA0_MASK
- + UCODE_ID_SDMA1_MASK
- + UCODE_ID_CP_CE_MASK
- + UCODE_ID_CP_ME_MASK
- + UCODE_ID_CP_PFP_MASK
- + UCODE_ID_CP_MEC_MASK;
-
- PP_ASSERT_WITH_CODE(
- 0 == tonga_send_msg_to_smc_with_parameter_without_waiting(
- smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
- "Fail to Request SMU Load uCode", return 0);
-
- return 0;
-}
-
-static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
- uint32_t firmwareType)
-{
- return 0;
-}
-
-/**
- * Upload the SMC firmware to the SMC microcontroller.
- *
- * @param smumgr the address of the powerplay hardware manager.
- * @param pFirmware the data structure containing the various sections of the firmware.
- */
-static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr)
-{
- const uint8_t *src;
- uint32_t byte_count;
- uint32_t *data;
- struct cgs_firmware_info info = {0};
-
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- cgs_get_firmware_info(smumgr->device,
- tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-
- if (info.image_size & 3) {
- printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (info.image_size > TONGA_SMC_SIZE) {
- printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
- byte_count = info.image_size;
- src = (const uint8_t *)info.kptr;
-
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
-
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-
- return 0;
-}
static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
{
@@ -623,7 +45,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = tonga_smu_upload_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result)
return result;
@@ -653,7 +75,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
/**
* Call Test SMU message with 0x20000 offset to trigger SMU start
*/
- tonga_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(smumgr);
/* Wait for done bit to be set */
SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
@@ -690,13 +112,13 @@ static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = tonga_smu_upload_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- tonga_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(smumgr);
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -718,7 +140,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
int result;
/* Only start SMC if SMC RAM is not running */
- if (!tonga_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(smumgr)) {
/*Check if SMU is running in protected mode*/
if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
@@ -732,7 +154,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
}
}
- result = tonga_request_smu_reload_fw(smumgr);
+ result = smu7_request_smu_load_fw(smumgr);
return result;
}
@@ -746,67 +168,41 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
*/
static int tonga_smu_init(struct pp_smumgr *smumgr)
{
- struct tonga_smumgr *tonga_smu;
- uint8_t *internal_buf;
- uint64_t mc_addr = 0;
- /* Allocate memory for backend private data */
- tonga_smu = (struct tonga_smumgr *)(smumgr->backend);
- tonga_smu->header_buffer.data_size =
- ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- tonga_smu->smu_buffer.data_size = 200*4096;
-
- smu_allocate_memory(smumgr->device,
- tonga_smu->header_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &tonga_smu->header_buffer.kaddr,
- &tonga_smu->header_buffer.handle);
-
- tonga_smu->pHeader = tonga_smu->header_buffer.kaddr;
- tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)tonga_smu->header_buffer.handle);
- return -1);
-
- smu_allocate_memory(smumgr->device,
- tonga_smu->smu_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &tonga_smu->smu_buffer.kaddr,
- &tonga_smu->smu_buffer.handle);
-
- internal_buf = tonga_smu->smu_buffer.kaddr;
- tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != internal_buf),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)tonga_smu->smu_buffer.handle);
- return -1;);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+
+ int i;
+
+ if (smu7_init(smumgr))
+ return -EINVAL;
+
+ for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
+ smu_data->activity_target[i] = 30;
return 0;
}
static const struct pp_smumgr_func tonga_smu_funcs = {
.smu_init = &tonga_smu_init,
- .smu_fini = &tonga_smu_fini,
+ .smu_fini = &smu7_smu_fini,
.start_smu = &tonga_start_smu,
- .check_fw_load_finish = &tonga_check_fw_load_finish,
- .request_smu_load_fw = &tonga_request_smu_reload_fw,
- .request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw,
- .send_msg_to_smc = &tonga_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter,
+ .check_fw_load_finish = &smu7_check_fw_load_finish,
+ .request_smu_load_fw = &smu7_request_smu_load_fw,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = &smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
+ .update_smc_table = tonga_update_smc_table,
+ .get_offsetof = tonga_get_offsetof,
+ .process_firmware_header = tonga_process_firmware_header,
+ .init_smc_table = tonga_init_smc_table,
+ .update_sclk_threshold = tonga_update_sclk_threshold,
+ .thermal_setup_fan_table = tonga_thermal_setup_fan_table,
+ .populate_all_graphic_levels = tonga_populate_all_graphic_levels,
+ .populate_all_memory_levels = tonga_populate_all_memory_levels,
+ .get_mac_definition = tonga_get_mac_definition,
+ .initialize_mc_reg_table = tonga_initialize_mc_reg_table,
+ .is_dpm_running = tonga_is_dpm_running,
};
int tonga_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 33c788d7f05c..8c4f761d5bc8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -24,30 +24,36 @@
#ifndef _TONGA_SMUMGR_H_
#define _TONGA_SMUMGR_H_
-struct tonga_buffer_entry {
- uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
- void *kaddr;
- unsigned long handle;
+#include "smu72_discrete.h"
+
+#include "smu7_smumgr.h"
+
+struct tonga_mc_reg_entry {
+ uint32_t mclk_max;
+ uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct tonga_mc_reg_table {
+ uint8_t last; /* number of registers*/
+ uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
+ uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+ struct tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
+
struct tonga_smumgr {
- uint8_t *pHeader;
- uint8_t *pMecImage;
- uint32_t ulSoftRegsStart;
- struct tonga_buffer_entry header_buffer;
- struct tonga_buffer_entry smu_buffer;
-};
+ struct smu7_smumgr smu7_data;
+ struct SMU72_Discrete_DpmTable smc_state_table;
+ struct SMU72_Discrete_Ulv ulv_setting;
+ struct SMU72_Discrete_PmFuses power_tune_table;
+ const struct tonga_pt_defaults *power_tune_defaults;
+ SMU72_Discrete_MCRegisters mc_regs;
+ struct tonga_mc_reg_table mc_reg_table;
-extern int tonga_smum_init(struct pp_smumgr *smumgr);
-extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
- uint32_t smcStartAddress, const uint8_t *src,
- uint32_t byteCount, uint32_t limit);
-extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
- uint32_t *value, uint32_t limit);
-extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
- uint32_t value, uint32_t limit);
+ uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS];
+
+};
#endif
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ef312bb75fda..963a24d46a93 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
- if (s_job)
+ if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {