summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c288
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c4
-rwxr-xr-xdrivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c18
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c123
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c261
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_logger.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h)17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c155
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h)56
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c334
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h3
-rw-r--r--drivers/gpu/drm/amd/include/soc15_ih_clientid.h70
-rw-r--r--drivers/gpu/drm/amd/include/vega10_ip_offset.h286
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c617
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c209
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c577
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c)547
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h)130
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h)4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c)997
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h)79
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c536
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h180
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h174
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_instance.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h28
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c883
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c87
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c399
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c344
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h)19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c891
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h)82
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c61
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c75
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c10
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c5
-rw-r--r--include/drm/ttm/ttm_bo_driver.h252
-rw-r--r--include/drm/ttm/ttm_tt.h272
-rw-r--r--include/uapi/drm/amdgpu_drm.h1
150 files changed, 5739 insertions, 5158 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 8522c2ea1f3e..2ca2b5154d52 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -87,8 +87,7 @@ amdgpu-y += \
# add SMC block
amdgpu-y += \
- amdgpu_dpm.o \
- amdgpu_powerplay.o
+ amdgpu_dpm.o
# add DCE block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index eba4abc8aac6..f44a83ab2bf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -441,7 +441,7 @@ struct amdgpu_sa_bo {
void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
- u64 flags, bool kernel,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct drm_gem_object **obj);
@@ -1081,8 +1081,6 @@ struct amdgpu_wb {
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
-void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
-
/*
* SDMA
*/
@@ -1395,9 +1393,7 @@ enum amd_hw_ip_block_type {
#define HWIP_MAX_INSTANCE 6
struct amd_powerplay {
- struct cgs_device *cgs_device;
void *pp_handle;
- const struct amd_ip_funcs *ip_funcs;
const struct amd_pm_funcs *pp_funcs;
};
@@ -1632,6 +1628,9 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
+
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
@@ -1655,6 +1654,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
+#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
+
#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8a23aa8f9c73..4d36203ffb11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -221,8 +221,9 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
uint64_t gpu_addr_tmp = 0;
void *cpu_ptr_tmp = NULL;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &bo);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
+ NULL, &bo);
if (r) {
dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e0371a9967b9..a12a1654e124 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -997,8 +997,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
- ret = amdgpu_bo_create(adev, size, byte_align, false,
- alloc_domain, alloc_flags, NULL, NULL, &bo);
+ ret = amdgpu_bo_create(adev, size, byte_align,
+ alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
domain_string(alloc_domain), ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ff8efd0f8fd5..a0f48cb9b8f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -114,6 +114,9 @@ union igp_info {
struct atom_integrated_system_info_v1_11 v11;
};
+union umc_info {
+ struct atom_umc_info_v3_1 v31;
+};
/*
* Return vram width from integrated system info table, if available,
* or 0 if not.
@@ -143,6 +146,94 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
return 0;
}
+static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
+ int atom_mem_type)
+{
+ int vram_type;
+
+ if (adev->flags & AMD_IS_APU) {
+ switch (atom_mem_type) {
+ case Ddr2MemType:
+ case LpDdr2MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR2;
+ break;
+ case Ddr3MemType:
+ case LpDdr3MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR3;
+ break;
+ case Ddr4MemType:
+ case LpDdr4MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR4;
+ break;
+ default:
+ vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (atom_mem_type) {
+ case ATOM_DGPU_VRAM_TYPE_GDDR5:
+ vram_type = AMDGPU_VRAM_TYPE_GDDR5;
+ break;
+ case ATOM_DGPU_VRAM_TYPE_HBM:
+ vram_type = AMDGPU_VRAM_TYPE_HBM;
+ break;
+ default:
+ vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ break;
+ }
+ }
+
+ return vram_type;
+}
+/*
+ * Return vram type from either integrated system info table
+ * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
+ */
+int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ u8 mem_type;
+
+ if (adev->flags & AMD_IS_APU)
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ integratedsysteminfo);
+ else
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ if (adev->flags & AMD_IS_APU) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 11:
+ mem_type = igp_info->v11.memorytype;
+ return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ default:
+ return 0;
+ }
+ } else {
+ umc_info = (union umc_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 1:
+ mem_type = umc_info->v31.vram_type;
+ return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ default:
+ return 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
union firmware_info {
struct atom_firmware_info_v3_1 v31;
};
@@ -151,10 +242,6 @@ union smu_info {
struct atom_smu_info_v3_1 v31;
};
-union umc_info {
- struct atom_umc_info_v3_1 v31;
-};
-
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 288b97e54347..7689c961c4ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -28,6 +28,7 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 2fb299afc12b..02b849be083b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -80,8 +80,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
int time;
n = AMDGPU_BENCHMARK_ITERATIONS;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
- NULL, &sobj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
+ ttm_bo_type_kernel, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -93,8 +93,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
- NULL, &dobj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
+ ttm_bo_type_kernel, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f2dd98d3f5e6..37098c68a645 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -654,11 +654,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
else
strcpy(fw_name, "amdgpu/vega10_smc.bin");
break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_RAVEN:
- adev->pm.fw_version = info->version;
- return 0;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ffc1f6f46913..9da8d5802980 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
+ amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 856378434ea2..690cf77b950e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -87,6 +87,8 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
+static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
+
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -121,6 +123,32 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
return ret;
}
+/*
+ * MMIO register read with bytes helper functions
+ * @offset:bytes offset from MMIO start
+ *
+*/
+
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
+ if (offset < adev->rmmio_size)
+ return (readb(adev->rmmio + offset));
+ BUG();
+}
+
+/*
+ * MMIO register write with bytes helper functions
+ * @offset:bytes offset from MMIO start
+ * @value: the value want to be written to the register
+ *
+*/
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
+ if (offset < adev->rmmio_size)
+ writeb(value, adev->rmmio + offset);
+ else
+ BUG();
+}
+
+
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
@@ -830,6 +858,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
amdgpu_lockup_timeout = 10000;
}
+
+ adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
}
/**
@@ -1387,7 +1417,8 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE);
@@ -1436,7 +1467,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
@@ -1545,7 +1577,8 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
- if (i != AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
@@ -1878,6 +1911,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (adev->rio_mem == NULL)
DRM_INFO("PCI I/O BAR is not found.\n");
+ amdgpu_device_get_pcie_info(adev);
+
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
@@ -2086,6 +2121,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
+ amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_device_ip_fini(adev);
if (adev->firmware.gpu_info_fw) {
@@ -2114,7 +2150,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2755,7 +2790,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
return r;
}
-void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
+static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 137145dd14a9..cf0f186c6092 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -113,11 +113,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
int r;
if (adev->gart.robj == NULL) {
- r = amdgpu_bo_create(adev, adev->gart.table_size,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+ r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->gart.robj);
+ ttm_bo_type_kernel, NULL,
+ &adev->gart.robj);
if (r) {
return r;
}
@@ -315,7 +316,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++)
- adev->gart.pages[p] = pagelist[i];
+ adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
#endif
if (!adev->gart.ptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 55a840ae6d68..46b9ea4e6103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
if (robj) {
- if (robj->gem_base.import_attach)
- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
amdgpu_mn_unregister(robj);
amdgpu_bo_unref(&robj);
}
@@ -45,7 +43,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
- u64 flags, bool kernel,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct drm_gem_object **obj)
{
@@ -59,8 +57,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
}
retry:
- r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, resv, &bo);
+ r = amdgpu_bo_create(adev, size, alignment, initial_domain,
+ flags, type, resv, &bo);
if (r) {
if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index b8a7dba69595..0e01f115bbe5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -25,51 +25,12 @@
#define __AMDGPU_IH_H__
#include <linux/chash.h>
+#include "soc15_ih_clientid.h"
struct amdgpu_device;
- /*
- * vega10+ IH clients
- */
-enum amdgpu_ih_clientid
-{
- AMDGPU_IH_CLIENTID_IH = 0x00,
- AMDGPU_IH_CLIENTID_ACP = 0x01,
- AMDGPU_IH_CLIENTID_ATHUB = 0x02,
- AMDGPU_IH_CLIENTID_BIF = 0x03,
- AMDGPU_IH_CLIENTID_DCE = 0x04,
- AMDGPU_IH_CLIENTID_ISP = 0x05,
- AMDGPU_IH_CLIENTID_PCIE0 = 0x06,
- AMDGPU_IH_CLIENTID_RLC = 0x07,
- AMDGPU_IH_CLIENTID_SDMA0 = 0x08,
- AMDGPU_IH_CLIENTID_SDMA1 = 0x09,
- AMDGPU_IH_CLIENTID_SE0SH = 0x0a,
- AMDGPU_IH_CLIENTID_SE1SH = 0x0b,
- AMDGPU_IH_CLIENTID_SE2SH = 0x0c,
- AMDGPU_IH_CLIENTID_SE3SH = 0x0d,
- AMDGPU_IH_CLIENTID_SYSHUB = 0x0e,
- AMDGPU_IH_CLIENTID_THM = 0x0f,
- AMDGPU_IH_CLIENTID_UVD = 0x10,
- AMDGPU_IH_CLIENTID_VCE0 = 0x11,
- AMDGPU_IH_CLIENTID_VMC = 0x12,
- AMDGPU_IH_CLIENTID_XDMA = 0x13,
- AMDGPU_IH_CLIENTID_GRBM_CP = 0x14,
- AMDGPU_IH_CLIENTID_ATS = 0x15,
- AMDGPU_IH_CLIENTID_ROM_SMUIO = 0x16,
- AMDGPU_IH_CLIENTID_DF = 0x17,
- AMDGPU_IH_CLIENTID_VCE1 = 0x18,
- AMDGPU_IH_CLIENTID_PWR = 0x19,
- AMDGPU_IH_CLIENTID_UTCL2 = 0x1b,
- AMDGPU_IH_CLIENTID_EA = 0x1c,
- AMDGPU_IH_CLIENTID_UTCL2LOG = 0x1d,
- AMDGPU_IH_CLIENTID_MP0 = 0x1e,
- AMDGPU_IH_CLIENTID_MP1 = 0x1f,
-
- AMDGPU_IH_CLIENTID_MAX,
-
- AMDGPU_IH_CLIENTID_VCN = AMDGPU_IH_CLIENTID_UVD
-};
#define AMDGPU_IH_CLIENTID_LEGACY 0
+#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
#define AMDGPU_PAGEFAULT_HASH_BITS 8
struct amdgpu_retryfault_hashtable {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d9533bbc467c..d6416ee52e32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -350,6 +350,7 @@ struct amdgpu_mode_info {
u16 firmware_flags;
/* pointer to backlight encoder */
struct amdgpu_encoder *bl_encoder;
+ u8 bl_level; /* saved backlight level */
struct amdgpu_audio audio; /* audio stuff */
int num_crtc; /* number of crtcs */
int num_hpd; /* number of hpd pins */
@@ -550,14 +551,6 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
- /* number of modes generated from EDID at 'dc_sink' */
- int num_modes;
- /* The 'old' sink - before an HPD.
- * The 'current' sink is in dc_link->sink. */
- struct dc_sink *dc_sink;
- struct dc_link *dc_link;
- struct dc_sink *dc_em_sink;
- const struct dc_stream *stream;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
@@ -568,27 +561,6 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset;
-
- struct drm_dp_mst_topology_mgr mst_mgr;
- struct amdgpu_dm_dp_aux dm_dp_aux;
- struct drm_dp_mst_port *port;
- struct amdgpu_connector *mst_port;
- struct amdgpu_encoder *mst_encoder;
- struct semaphore mst_sem;
-
- /* TODO see if we can merge with ddc_bus or make a dm_connector */
- struct amdgpu_i2c_adapter *i2c;
-
- /* Monitor range limits */
- int min_vfreq ;
- int max_vfreq ;
- int pixel_clock_mhz;
-
- /*freesync caps*/
- struct mod_freesync_caps caps;
-
- struct mutex hpd_lock;
-
};
/* TODO: start to use this struct and remove same field from base one */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9157745fce14..6d08cde8443c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -60,6 +60,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
+ if (bo->gem_base.import_attach)
+ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
@@ -173,13 +175,15 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use, and returns it still
* reserved.
*
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
* Returns 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
@@ -191,10 +195,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
int r;
if (!*bo_ptr) {
- r = amdgpu_bo_create(adev, size, align, true, domain,
+ r = amdgpu_bo_create(adev, size, align, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, bo_ptr);
+ ttm_bo_type_kernel, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r);
@@ -242,12 +246,14 @@ error_free:
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use.
*
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
* Returns 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
@@ -335,21 +341,19 @@ fail:
return false;
}
-static int amdgpu_bo_do_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
+static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ int byte_align, u32 domain,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr)
{
struct ttm_operation_ctx ctx = {
- .interruptible = !kernel,
+ .interruptible = (type != ttm_bo_type_kernel),
.no_wait_gpu = false,
.resv = resv,
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
};
struct amdgpu_bo *bo;
- enum ttm_bo_type type;
unsigned long page_align;
size_t acc_size;
int r;
@@ -360,13 +364,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (!amdgpu_bo_validate_size(adev, size, domain))
return -ENOMEM;
- if (kernel) {
- type = ttm_bo_type_kernel;
- } else if (sg) {
- type = ttm_bo_type_sg;
- } else {
- type = ttm_bo_type_device;
- }
*bo_ptr = NULL;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
@@ -385,7 +382,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA);
bo->allowed_domains = bo->preferred_domains;
- if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+ if (type != ttm_bo_type_kernel &&
+ bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
bo->flags = flags;
@@ -423,7 +421,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, &ctx, acc_size,
- sg, resv, &amdgpu_ttm_bo_destroy);
+ NULL, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0))
return r;
@@ -435,7 +433,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
- if (kernel)
+ if (type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
@@ -479,12 +477,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
if (bo->shadow)
return 0;
- r = amdgpu_bo_do_create(adev, size, byte_align, true,
- AMDGPU_GEM_DOMAIN_GTT,
+ r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
- NULL, bo->tbo.resv,
- &bo->shadow);
+ ttm_bo_type_kernel,
+ bo->tbo.resv, &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
@@ -495,18 +492,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
-int amdgpu_bo_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
+int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+ int byte_align, u32 domain,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr)
{
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
- r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
- parent_flags, sg, resv, bo_ptr);
+ r = amdgpu_bo_do_create(adev, size, byte_align, domain,
+ parent_flags, type, resv, bo_ptr);
if (r)
return r;
@@ -821,7 +817,8 @@ static const char *amdgpu_vram_names[] = {
"GDDR4",
"GDDR5",
"HBM",
- "DDR3"
+ "DDR3",
+ "DDR4",
};
int amdgpu_bo_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index d4dbfe1f842e..546f77cb7882 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -203,12 +203,11 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
-int amdgpu_bo_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct reservation_object *resv,
- struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+ int byte_align, u32 domain,
+ u64 flags, enum ttm_bo_type type,
+ struct reservation_object *resv,
+ struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 632b18670098..361975cf45a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1154,7 +1154,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
umode_t effective_mode = attr->mode;
/* handle non-powerplay limitations */
- if (!adev->powerplay.cgs_device) {
+ if (!adev->powerplay.pp_handle) {
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
deleted file mode 100644
index 5c2e2d5dc1ee..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "atom.h"
-#include "amdgpu.h"
-#include "amd_shared.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include "amdgpu_pm.h"
-#include <drm/amdgpu_drm.h>
-#include "amdgpu_powerplay.h"
-#include "si_dpm.h"
-#include "cik_dpm.h"
-#include "vi_dpm.h"
-
-static int amdgpu_pp_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amd_powerplay *amd_pp;
- int ret = 0;
-
- amd_pp = &(adev->powerplay);
- amd_pp->pp_handle = (void *)adev;
-
- switch (adev->asic_type) {
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- case CHIP_POLARIS12:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_TOPAZ:
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_VEGA10:
- case CHIP_RAVEN:
- amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
- amd_pp->ip_funcs = &pp_ip_funcs;
- amd_pp->pp_funcs = &pp_dpm_funcs;
- break;
- /* These chips don't have powerplay implemenations */
-#ifdef CONFIG_DRM_AMDGPU_SI
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- amd_pp->ip_funcs = &si_dpm_ip_funcs;
- amd_pp->pp_funcs = &si_dpm_funcs;
- break;
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- if (amdgpu_dpm == -1) {
- amd_pp->ip_funcs = &ci_dpm_ip_funcs;
- amd_pp->pp_funcs = &ci_dpm_funcs;
- } else {
- amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
- amd_pp->ip_funcs = &pp_ip_funcs;
- amd_pp->pp_funcs = &pp_dpm_funcs;
- }
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- case CHIP_KAVERI:
- amd_pp->ip_funcs = &kv_dpm_ip_funcs;
- amd_pp->pp_funcs = &kv_dpm_funcs;
- break;
-#endif
- default:
- ret = -EINVAL;
- break;
- }
-
- if (adev->powerplay.ip_funcs->early_init)
- ret = adev->powerplay.ip_funcs->early_init(adev);
-
- return ret;
-}
-
-
-static int amdgpu_pp_late_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->late_init)
- ret = adev->powerplay.ip_funcs->late_init(
- adev->powerplay.pp_handle);
-
- return ret;
-}
-
-static int amdgpu_pp_sw_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->sw_init)
- ret = adev->powerplay.ip_funcs->sw_init(
- adev->powerplay.pp_handle);
-
- return ret;
-}
-
-static int amdgpu_pp_sw_fini(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->sw_fini)
- ret = adev->powerplay.ip_funcs->sw_fini(
- adev->powerplay.pp_handle);
- if (ret)
- return ret;
-
- return ret;
-}
-
-static int amdgpu_pp_hw_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_init_bo(adev);
-
- if (adev->powerplay.ip_funcs->hw_init)
- ret = adev->powerplay.ip_funcs->hw_init(
- adev->powerplay.pp_handle);
-
- return ret;
-}
-
-static int amdgpu_pp_hw_fini(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->hw_fini)
- ret = adev->powerplay.ip_funcs->hw_fini(
- adev->powerplay.pp_handle);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_fini_bo(adev);
-
- return ret;
-}
-
-static void amdgpu_pp_late_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->late_fini)
- adev->powerplay.ip_funcs->late_fini(
- adev->powerplay.pp_handle);
-
- if (adev->powerplay.cgs_device)
- amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
-}
-
-static int amdgpu_pp_suspend(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->suspend)
- ret = adev->powerplay.ip_funcs->suspend(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_resume(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->resume)
- ret = adev->powerplay.ip_funcs->resume(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->set_clockgating_state)
- ret = adev->powerplay.ip_funcs->set_clockgating_state(
- adev->powerplay.pp_handle, state);
- return ret;
-}
-
-static int amdgpu_pp_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->set_powergating_state)
- ret = adev->powerplay.ip_funcs->set_powergating_state(
- adev->powerplay.pp_handle, state);
- return ret;
-}
-
-
-static bool amdgpu_pp_is_idle(void *handle)
-{
- bool ret = true;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->is_idle)
- ret = adev->powerplay.ip_funcs->is_idle(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_wait_for_idle(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->wait_for_idle)
- ret = adev->powerplay.ip_funcs->wait_for_idle(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_soft_reset(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->soft_reset)
- ret = adev->powerplay.ip_funcs->soft_reset(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
- .name = "amdgpu_powerplay",
- .early_init = amdgpu_pp_early_init,
- .late_init = amdgpu_pp_late_init,
- .sw_init = amdgpu_pp_sw_init,
- .sw_fini = amdgpu_pp_sw_fini,
- .hw_init = amdgpu_pp_hw_init,
- .hw_fini = amdgpu_pp_hw_fini,
- .late_fini = amdgpu_pp_late_fini,
- .suspend = amdgpu_pp_suspend,
- .resume = amdgpu_pp_resume,
- .is_idle = amdgpu_pp_is_idle,
- .wait_for_idle = amdgpu_pp_wait_for_idle,
- .soft_reset = amdgpu_pp_soft_reset,
- .set_clockgating_state = amdgpu_pp_set_clockgating_state,
- .set_powergating_state = amdgpu_pp_set_powergating_state,
-};
-
-const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index fb66b45548d3..1c9991738477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -105,11 +105,16 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
int ret;
ww_mutex_lock(&resv->lock, NULL);
- ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+ ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
+ resv, &bo);
if (ret)
goto error;
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 6e712f12eecd..9a75410cd576 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -62,6 +62,9 @@ static int psp_sw_init(void *handle)
psp->adev = adev;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
ret = psp_init_microcode(psp);
if (ret) {
DRM_ERROR("Failed to load psp firmware!\n");
@@ -75,6 +78,9 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
@@ -453,6 +459,9 @@ static int psp_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f3d81b6fb499..2dbe87591f81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,9 +59,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, NULL, &vram_obj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
+ ttm_bo_type_kernel, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -80,9 +79,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void **vram_start, **vram_end;
struct dma_fence *fence = NULL;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
- NULL, gtt_obj + i);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, 0,
+ ttm_bo_type_kernel, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c2fae04d769a..e28b73609fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -204,6 +204,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
+ if (bo->type == ttm_bo_type_sg) {
+ placement->num_placement = 0;
+ placement->num_busy_placement = 0;
+ return;
+ }
+
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
@@ -982,20 +988,20 @@ static struct ttm_backend_func amdgpu_backend_func = {
.destroy = &amdgpu_ttm_backend_destroy,
};
-static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_ttm_adev(bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) {
+ if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
}
@@ -1021,7 +1027,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ gtt->ttm.dma_address,
+ ttm->num_pages);
ttm->state = tt_unbound;
return 0;
}
@@ -1335,11 +1342,12 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
if (adev->fw_vram_usage.size > 0 &&
adev->fw_vram_usage.size <= vram_size) {
- r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL,
- &adev->fw_vram_usage.reserved_bo);
+ r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ ttm_bo_type_kernel, NULL,
+ &adev->fw_vram_usage.reserved_bo);
if (r)
goto error_create;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0b237e027cab..24474294c92a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -413,9 +413,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
if (!entry->base.bo) {
r = amdgpu_bo_create(adev,
amdgpu_vm_bo_size(adev, level),
- AMDGPU_GPU_PAGE_SIZE, true,
+ AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, flags,
- NULL, resv, &pt);
+ ttm_bo_type_kernel, resv, &pt);
if (r)
return r;
@@ -2409,8 +2409,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
AMDGPU_GEM_CREATE_SHADOW);
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
- r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
- flags, NULL, NULL, &vm->root.base.bo);
+ r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
+ ttm_bo_type_kernel, NULL, &vm->root.base.bo);
if (r)
goto error_free_sched_entity;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2da127..d702fb8e3427 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -34,7 +34,7 @@
#include <linux/backlight.h>
#include "bif/bif_4_1_d.h"
-static u8
+u8
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
{
u8 backlight_level;
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
return backlight_level;
}
-static void
+void
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
u8 backlight_level)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40515ce..f77cbdef679e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
#define __ATOMBIOS_ENCODER_H__
u8
+amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
+void
+amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
+ u8 backlight_level);
+u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
void
amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index ddb814f7e952..98d1dd253596 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -65,6 +65,8 @@ MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
#define VOLTAGE_VID_OFFSET_SCALE1 625
#define VOLTAGE_VID_OFFSET_SCALE2 100
+static const struct amd_pm_funcs ci_dpm_funcs;
+
static const struct ci_pt_defaults defaults_hawaii_xt =
{
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
@@ -6241,6 +6243,7 @@ static int ci_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->powerplay.pp_funcs = &ci_dpm_funcs;
ci_dpm_set_irq_funcs(adev);
return 0;
@@ -6760,7 +6763,7 @@ static int ci_dpm_read_sensor(void *handle, int idx,
}
}
-const struct amd_ip_funcs ci_dpm_ip_funcs = {
+static const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
.late_init = ci_dpm_late_init,
@@ -6777,7 +6780,16 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.set_powergating_state = ci_dpm_set_powergating_state,
};
-const struct amd_pm_funcs ci_dpm_funcs = {
+const struct amdgpu_ip_block_version ci_smu_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+};
+
+static const struct amd_pm_funcs ci_dpm_funcs = {
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
.post_set_power_state = &ci_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4324184996a5..0df22030e713 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,7 +67,6 @@
#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
/*
@@ -1887,10 +1886,6 @@ static int cik_common_early_init(void *handle)
return -EINVAL;
}
- adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
-
- amdgpu_device_get_pcie_info(adev);
-
return 0;
}
@@ -2000,7 +1995,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (amdgpu_dpm == -1)
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -2018,7 +2016,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (amdgpu_dpm == -1)
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -2036,7 +2037,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -2055,7 +2056,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index c7b4349f6319..2a086610f74d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -24,8 +24,7 @@
#ifndef __CIK_DPM_H__
#define __CIK_DPM_H__
-extern const struct amd_ip_funcs ci_dpm_ip_funcs;
-extern const struct amd_ip_funcs kv_dpm_ip_funcs;
-extern const struct amd_pm_funcs ci_dpm_funcs;
-extern const struct amd_pm_funcs kv_dpm_funcs;
+extern const struct amdgpu_ip_block_version ci_smu_ip_block;
+extern const struct amdgpu_ip_block_version kv_smu_ip_block;
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 7ea900010702..452f88ea46a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2862,6 +2862,11 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
return dce_v10_0_hw_fini(handle);
}
@@ -2870,6 +2875,9 @@ static int dce_v10_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
ret = dce_v10_0_hw_init(handle);
/* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 158b92ea435f..a7c1c584a191 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2988,6 +2988,11 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
return dce_v11_0_hw_fini(handle);
}
@@ -2996,6 +3001,9 @@ static int dce_v11_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
ret = dce_v11_0_hw_init(handle);
/* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index ee2162e81da9..9f67b7fd3487 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2730,6 +2730,11 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
return dce_v6_0_hw_fini(handle);
}
@@ -2738,6 +2743,9 @@ static int dce_v6_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
ret = dce_v6_0_hw_init(handle);
/* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 8dbe97dff58c..f55422cbd77a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2760,6 +2760,11 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
return dce_v8_0_hw_fini(handle);
}
@@ -2768,6 +2773,9 @@ static int dce_v8_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
ret = dce_v8_0_hw_init(handle);
/* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d73bbb092202..d1d2c27156b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1261,23 +1261,23 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
&adev->gfx.priv_inst_irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 67cd1fe17649..a70cbc45c4c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -836,9 +836,9 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
+ adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
switch (adev->asic_type) {
case CHIP_RAVEN:
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
} else {
@@ -849,8 +849,6 @@ static int gmc_v9_0_sw_init(void *handle)
}
break;
case CHIP_VEGA10:
- /* XXX Don't know how to get VRAM type yet. */
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
/*
* To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size of Vega10,
@@ -863,9 +861,9 @@ static int gmc_v9_0_sw_init(void *handle)
}
/* This interrupt is VMC page fault.*/
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
&adev->gmc.vm_fault);
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
&adev->gmc.vm_fault);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 8766681cfd3f..81babe026529 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -42,6 +42,8 @@
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
+static const struct amd_pm_funcs kv_dpm_funcs;
+
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable);
@@ -2960,6 +2962,7 @@ static int kv_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->powerplay.pp_funcs = &kv_dpm_funcs;
kv_dpm_set_irq_funcs(adev);
return 0;
@@ -3301,7 +3304,7 @@ static int kv_dpm_read_sensor(void *handle, int idx,
}
}
-const struct amd_ip_funcs kv_dpm_ip_funcs = {
+static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
.late_init = kv_dpm_late_init,
@@ -3318,7 +3321,16 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.set_powergating_state = kv_dpm_set_powergating_state,
};
-const struct amd_pm_funcs kv_dpm_funcs = {
+const struct amdgpu_ip_block_version kv_smu_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+};
+
+static const struct amd_pm_funcs kv_dpm_funcs = {
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
.post_set_power_state = &kv_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 271452d3999a..8fb933c62cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -33,56 +33,34 @@
static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
{
- u32 reg;
- int timeout = AI_MAILBOX_TIMEDOUT;
- u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
-
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
-
- /*Wait for RCV_MSG_VALID to be 0*/
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- while (reg & mask) {
- if (timeout <= 0) {
- pr_err("RCV_MSG_VALID is not cleared\n");
- break;
- }
- mdelay(1);
- timeout -=1;
-
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- }
+ WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
}
static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
{
- u32 reg;
+ WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
+}
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
- TRN_MSG_VALID, val ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
- reg);
+/*
+ * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
+ * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
+ * by host.
+ *
+ * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
+ * correct value since it doesn't return the RCV_DW0 under the case that
+ * RCV_MSG_VALID is set by host.
+ */
+static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
+{
+ return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
}
+
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
enum idh_event event)
{
u32 reg;
- u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
-
- if (event != IDH_FLR_NOTIFICATION_CMPL) {
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- if (!(reg & mask))
- return -ENOENT;
- }
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
@@ -94,54 +72,67 @@ static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
return 0;
}
+static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
+ return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
+}
+
static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
{
- int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
- u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
- u32 reg;
+ int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
+ u8 reg;
+
+ do {
+ reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
+ if (reg & 2)
+ return 0;
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- while (!(reg & mask)) {
- if (timeout <= 0) {
- pr_err("Doesn't get ack from pf.\n");
- r = -ETIME;
- break;
- }
mdelay(5);
timeout -= 5;
+ } while (timeout > 1);
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- }
+ pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
- return r;
+ return -ETIME;
}
static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
- int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
-
- r = xgpu_ai_mailbox_rcv_msg(adev, event);
- while (r) {
- if (timeout <= 0) {
- pr_err("Doesn't get msg:%d from pf.\n", event);
- r = -ETIME;
- break;
- }
- mdelay(5);
- timeout -= 5;
+ int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
+ do {
r = xgpu_ai_mailbox_rcv_msg(adev, event);
- }
+ if (!r)
+ return 0;
- return r;
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+
+ return -ETIME;
}
static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3) {
u32 reg;
int r;
+ uint8_t trn;
+
+ /* IMPORTANT:
+ * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
+ * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
+ * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
+ * will return immediatly
+ */
+ do {
+ xgpu_ai_mailbox_set_valid(adev, false);
+ trn = xgpu_ai_peek_ack(adev);
+ if (trn) {
+ pr_err("trn=%x ACK should not asssert! wait again !\n", trn);
+ msleep(1);
+ }
+ } while(trn);
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
@@ -245,15 +236,36 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
-
- /* wait until RCV_MSG become 3 */
- if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
- pr_err("failed to recieve FLR_CMPL\n");
- return;
- }
-
- /* Trigger recovery due to world switch failure */
- amdgpu_device_gpu_recover(adev, NULL, false);
+ int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
+ int locked;
+
+ /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
+ * otherwise the mailbox msg will be ruined/reseted by
+ * the VF FLR.
+ *
+ * we can unlock the lock_reset to allow "amdgpu_job_timedout"
+ * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
+ * which means host side had finished this VF's FLR.
+ */
+ locked = mutex_trylock(&adev->lock_reset);
+ if (locked)
+ adev->in_gpu_reset = 1;
+
+ do {
+ if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ goto flr_done;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+flr_done:
+ if (locked)
+ mutex_unlock(&adev->lock_reset);
+
+ /* Trigger recovery for world switch failure if no TDR */
+ if (amdgpu_lockup_timeout == 0)
+ amdgpu_device_gpu_recover(adev, NULL, true);
}
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -274,24 +286,22 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int r;
-
- /* trigger gpu-reset by hypervisor only if TDR disbaled */
- if (!amdgpu_gpu_recovery) {
- /* see what event we get */
- r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
-
- /* sometimes the interrupt is delayed to inject to VM, so under such case
- * the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus
- * above recieve message could be failed, we should schedule the flr_work
- * anyway
+ enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
+
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.flr_work);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
*/
- if (r) {
- DRM_ERROR("FLR_NOTIFICATION is missed\n");
- xgpu_ai_mailbox_send_ack(adev);
- }
-
- schedule_work(&adev->virt.flr_work);
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
+ break;
}
return 0;
@@ -319,11 +329,11 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 67e78576a9eb..b4a9ceea334b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,9 @@
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
-#define AI_MAILBOX_TIMEDOUT 12000
+#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
+#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -51,4 +53,7 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
+#define AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4
+#define AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 + 1
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 215743df0957..9448c45d1b60 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -31,8 +31,6 @@
#include "sdma0/sdma0_4_0_sh_mask.h"
#include "sdma1/sdma1_4_0_offset.h"
#include "sdma1/sdma1_4_0_sh_mask.h"
-#include "mmhub/mmhub_1_0_offset.h"
-#include "mmhub/mmhub_1_0_sh_mask.h"
#include "hdp/hdp_4_0_offset.h"
#include "sdma0/sdma0_4_1_default.h"
@@ -1172,13 +1170,13 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA0, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA1, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -1333,7 +1331,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
{
DRM_DEBUG("IH: SDMA trap\n");
switch (entry->client_id) {
- case AMDGPU_IH_CLIENTID_SDMA0:
+ case SOC15_IH_CLIENTID_SDMA0:
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[0].ring);
@@ -1349,7 +1347,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_IH_CLIENTID_SDMA1:
+ case SOC15_IH_CLIENTID_SDMA1:
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[1].ring);
@@ -1399,7 +1397,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->sdma.num_instances > 1) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
@@ -1427,7 +1425,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->sdma.num_instances > 1) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
@@ -1458,7 +1456,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* 1-not override: enable sdma1 mem light sleep */
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->sdma.num_instances > 1) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
@@ -1472,7 +1470,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* 0-override:disable sdma1 mem light sleep */
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->sdma.num_instances > 1) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 6e61b56bfbfc..b154667a8fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -32,7 +32,7 @@
#include "amdgpu_vce.h"
#include "atom.h"
#include "amd_pcie.h"
-#include "amdgpu_powerplay.h"
+#include "si_dpm.h"
#include "sid.h"
#include "si_ih.h"
#include "gfx_v6_0.h"
@@ -1983,7 +1983,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
@@ -1997,7 +1997,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
@@ -2011,7 +2011,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8137c02fd16a..3bfcf0d257ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -67,6 +67,8 @@ MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+static const struct amd_pm_funcs si_dpm_funcs;
+
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -7914,6 +7916,7 @@ static int si_dpm_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->powerplay.pp_funcs = &si_dpm_funcs;
si_dpm_set_irq_funcs(adev);
return 0;
}
@@ -8014,7 +8017,7 @@ static int si_dpm_read_sensor(void *handle, int idx,
}
}
-const struct amd_ip_funcs si_dpm_ip_funcs = {
+static const struct amd_ip_funcs si_dpm_ip_funcs = {
.name = "si_dpm",
.early_init = si_dpm_early_init,
.late_init = si_dpm_late_init,
@@ -8031,7 +8034,16 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
.set_powergating_state = si_dpm_set_powergating_state,
};
-const struct amd_pm_funcs si_dpm_funcs = {
+const struct amdgpu_ip_block_version si_smu_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dpm_ip_funcs,
+};
+
+static const struct amd_pm_funcs si_dpm_funcs = {
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
.post_set_power_state = &si_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
index 9fe343de3477..6b7d292b919f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -245,8 +245,7 @@ enum si_display_gap
SI_PM_DISPLAY_GAP_IGNORE = 3,
};
-extern const struct amd_ip_funcs si_dpm_ip_funcs;
-extern const struct amd_pm_funcs si_dpm_funcs;
+extern const struct amdgpu_ip_block_version si_smu_ip_block;
struct ni_leakage_coeffients
{
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8dc8b72ed49b..c6e857325b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -57,7 +57,6 @@
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
-#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
@@ -531,10 +530,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -553,7 +551,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -692,10 +690,6 @@ static int soc15_common_early_init(void *handle)
xgpu_ai_mailbox_set_irq_funcs(adev);
}
- adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
-
- amdgpu_device_get_pcie_info(adev);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index e54cc3ca2303..eddc57f3b72a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -390,13 +390,13 @@ static int uvd_v7_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
if (r)
return r;
/* UVD ENC TRAP */
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 2329b310ccf2..73fd48d6c756 100755
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -420,7 +420,7 @@ static int vce_v4_0_sw_init(void *handle)
unsigned size;
int r, i;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index fdf4ac9313cf..8c132673bc79 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -75,13 +75,13 @@ static int vcn_v1_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
if (r)
return r;
/* VCN ENC TRAP */
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, i + 119,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
&adev->vcn.irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index cc8ce7e352a8..5ae5ed2e62d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -245,8 +245,8 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
* some faults get cleared.
*/
switch (dw0 & 0xff) {
- case AMDGPU_IH_CLIENTID_VMC:
- case AMDGPU_IH_CLIENTID_UTCL2:
+ case SOC15_IH_CLIENTID_VMC:
+ case SOC15_IH_CLIENTID_UTCL2:
break;
default:
/* Not a VM fault */
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 61360a1552d8..e7fb165cc9db 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -71,7 +71,6 @@
#include "uvd_v5_0.h"
#include "uvd_v6_0.h"
#include "vce_v3_0.h"
-#include "amdgpu_powerplay.h"
#if defined(CONFIG_DRM_AMD_ACP)
#include "amdgpu_acp.h"
#endif
@@ -1097,11 +1096,6 @@ static int vi_common_early_init(void *handle)
xgpu_vi_mailbox_set_irq_funcs(adev);
}
- /* vi use smc load by default */
- adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
-
- amdgpu_device_get_pcie_info(adev);
-
return 0;
}
@@ -1516,7 +1510,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1526,7 +1520,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -1546,7 +1540,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -1568,7 +1562,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -1586,7 +1580,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -1607,7 +1601,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index ec3285f65517..5b124a67404c 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -11,7 +11,7 @@ config DRM_AMD_DC
config DRM_AMD_DC_PRE_VEGA
bool "DC support for Polaris and older ASICs"
- default n
+ default y
help
Choose this option to enable the new DC support for older asics
by default. This includes Polaris, Carrizo, Tonga, Bonaire,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7e5c5c9eeb4f..ae512ecb65ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1131,7 +1131,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_RAVEN)
- client_id = AMDGPU_IH_CLIENTID_DCE;
+ client_id = SOC15_IH_CLIENTID_DCE;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -1231,7 +1231,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
DRM_ERROR("Failed to add crtc irq id!\n");
@@ -1255,7 +1255,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
DRM_ERROR("Failed to add page flip irq id!\n");
return r;
@@ -1276,7 +1276,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
}
/* HPD */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
DRM_ERROR("Failed to add hpd irq id!\n");
@@ -1365,6 +1365,43 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
#endif
+static int initialize_plane(struct amdgpu_display_manager *dm,
+ struct amdgpu_mode_info *mode_info,
+ int plane_id)
+{
+ struct amdgpu_plane *plane;
+ unsigned long possible_crtcs;
+ int ret = 0;
+
+ plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ mode_info->planes[plane_id] = plane;
+
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ return -ENOMEM;
+ }
+ plane->base.type = mode_info->plane_type[plane_id];
+
+ /*
+ * HACK: IGT tests expect that each plane can only have one
+ * one possible CRTC. For now, set one CRTC for each
+ * plane that is not an underlay, but still allow multiple
+ * CRTCs for underlay planes.
+ */
+ possible_crtcs = 1 << plane_id;
+ if (plane_id >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
+
+ if (ret) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ return ret;
+ }
+
+ return ret;
+}
+
/* In this architecture, the association
* connector -> encoder -> crtc
* id not really requried. The crtc and connector will hold the
@@ -1375,12 +1412,12 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
- uint32_t i;
+ int32_t i;
struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
- unsigned long possible_crtcs;
+ int32_t total_overlay_planes, total_primary_planes;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1388,30 +1425,22 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
return -1;
}
- for (i = 0; i < dm->dc->caps.max_planes; i++) {
- struct amdgpu_plane *plane;
+ /* Identify the number of planes to be initialized */
+ total_overlay_planes = dm->dc->caps.max_slave_planes;
+ total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
- plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
- mode_info->planes[i] = plane;
-
- if (!plane) {
- DRM_ERROR("KMS: Failed to allocate plane\n");
+ /* First initialize overlay planes, index starting after primary planes */
+ for (i = (total_overlay_planes - 1); i >= 0; i--) {
+ if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
- plane->base.type = mode_info->plane_type[i];
-
- /*
- * HACK: IGT tests expect that each plane can only have one
- * one possible CRTC. For now, set one CRTC for each
- * plane that is not an underlay, but still allow multiple
- * CRTCs for underlay planes.
- */
- possible_crtcs = 1 << i;
- if (i >= dm->dc->caps.max_streams)
- possible_crtcs = 0xff;
+ }
- if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
- DRM_ERROR("KMS: Failed to initialize plane\n");
+ /* Initialize primary planes */
+ for (i = (total_primary_planes - 1); i >= 0; i--) {
+ if (initialize_plane(dm, mode_info, i)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
goto fail;
}
}
@@ -1982,6 +2011,10 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
* every time.
*/
ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
+ if (ret) {
+ dc_transfer_func_release(dc_plane_state->in_transfer_func);
+ dc_plane_state->in_transfer_func = NULL;
+ }
return ret;
}
@@ -4691,8 +4724,8 @@ static int dm_update_planes_state(struct dc *dc,
int ret = 0;
- /* Add new planes */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ /* Add new planes, in reverse order as DC expectation */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
new_plane_crtc = new_plane_state->crtc;
old_plane_crtc = old_plane_state->crtc;
dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -4737,6 +4770,7 @@ static int dm_update_planes_state(struct dc *dc,
*lock_and_validation_needed = true;
} else { /* Add new planes */
+ struct dc_plane_state *dc_new_plane_state;
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
continue;
@@ -4755,34 +4789,42 @@ static int dm_update_planes_state(struct dc *dc,
WARN_ON(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = dc_create_plane_state(dc);
+ dc_new_plane_state = dc_create_plane_state(dc);
+ if (!dc_new_plane_state)
+ return -ENOMEM;
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, new_plane_crtc->base.id);
- if (!dm_new_plane_state->dc_state) {
- ret = -EINVAL;
- return ret;
- }
-
ret = fill_plane_attributes(
new_plane_crtc->dev->dev_private,
- dm_new_plane_state->dc_state,
+ dc_new_plane_state,
new_plane_state,
new_crtc_state);
- if (ret)
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
return ret;
+ }
+ /*
+ * Any atomic check errors that occur after this will
+ * not need a release. The plane state will be attached
+ * to the stream, and therefore part of the atomic
+ * state. It'll be released when the atomic state is
+ * cleaned.
+ */
if (!dc_add_plane_to_context(
dc,
dm_new_crtc_state->stream,
- dm_new_plane_state->dc_state,
+ dc_new_plane_state,
dm_state->context)) {
- ret = -EINVAL;
- return ret;
+ dc_plane_state_release(dc_new_plane_state);
+ return -EINVAL;
}
+ dm_new_plane_state->dc_state = dc_new_plane_state;
+
/* Tell DC to do a full surface update every time there
* is a plane change. Inefficient, but works for now.
*/
@@ -4812,6 +4854,9 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
return -EDEADLK;
crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
if (crtc->primary == plane && crtc_state->active) {
if (!plane_state->fb)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index e845c511656e..f6cb502c303f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -193,6 +193,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
struct drm_property_blob *blob = crtc->base.ctm;
struct dc_stream_state *stream = crtc->stream;
struct drm_color_ctm *ctm;
+ int64_t val;
int i;
if (!blob) {
@@ -206,7 +207,9 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
* DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating
* with homogeneous coordinates, augment the matrix with 0's.
*
- * The format provided is S31.32, which is the same as our fixed31_32.
+ * The format provided is S31.32, using signed-magnitude representation.
+ * Our fixed31_32 is also S31.32, but is using 2's complement. We have
+ * to convert from signed-magnitude to 2's complement.
*/
for (i = 0; i < 12; i++) {
/* Skip 4th element */
@@ -214,8 +217,14 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
continue;
}
- /* csc[i] = ctm[i - floor(i/4)] */
- stream->gamut_remap_matrix.matrix[i].value = ctm->matrix[i - (i/4)];
+
+ /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
+ val = ctm->matrix[i - (i/4)];
+ /* If negative, convert to 2's complement. */
+ if (val & (1ULL << 63))
+ val = -(val & ~(1ULL << 63));
+
+ stream->gamut_remap_matrix.matrix[i].value = val;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 39cfe0fbf1b9..8291d74f26bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -85,6 +85,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum ddc_result res;
ssize_t read_bytes;
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
read_bytes = dal_ddc_service_read_dpcd_data(
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
index 180a9d69d351..31bee054f43a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -60,7 +60,8 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
{LOG_EVENT_LINK_LOSS, "LinkLoss"},
{LOG_EVENT_UNDERFLOW, "Underflow"},
{LOG_IF_TRACE, "InterfaceTrace"},
- {LOG_DTN, "DTN"}
+ {LOG_DTN, "DTN"},
+ {LOG_PROFILING, "Profiling"}
};
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 1689c670ca6f..e7680c41f117 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -44,7 +44,7 @@
#include "bios_parser_common.h"
#define LAST_RECORD_TYPE 0xff
-
+#define SMU9_SYSPLL0_ID 0
struct i2c_id_config_access {
uint8_t bfI2C_LineMux:4;
@@ -1220,7 +1220,7 @@ static unsigned int bios_parser_get_smu_clock_info(
if (!bp->cmd_tbl.get_smu_clock_info)
return BP_RESULT_FAILURE;
- return bp->cmd_tbl.get_smu_clock_info(bp);
+ return bp->cmd_tbl.get_smu_clock_info(bp, 0);
}
static enum bp_result bios_parser_program_crtc_timing(
@@ -1376,7 +1376,7 @@ static enum bp_result get_firmware_info_v3_1(
if (bp->cmd_tbl.get_smu_clock_info != NULL) {
/* VBIOS gives in 10KHz */
info->smu_gpu_pll_output_freq =
- bp->cmd_tbl.get_smu_clock_info(bp) * 10;
+ bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
}
return BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index e362658aa3ce..3f63f712c8a4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -796,7 +796,7 @@ static enum bp_result set_dce_clock_v2_1(
******************************************************************************
*****************************************************************************/
-static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp);
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id);
static void init_get_smu_clock_info(struct bios_parser *bp)
{
@@ -805,12 +805,13 @@ static void init_get_smu_clock_info(struct bios_parser *bp)
}
-static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp)
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
{
struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
+ smu_input.syspll_id = id;
/* Get Specific Clock */
if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index 59061b806df5..ec1c0c9f3f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -96,7 +96,7 @@ struct cmd_tbl {
struct bios_parser *bp,
struct bp_set_dce_clock_parameters *bp_params);
unsigned int (*get_smu_clock_info)(
- struct bios_parser *bp);
+ struct bios_parser *bp, uint8_t id);
};
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 6d38b8f43198..0cbab81ab304 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -85,7 +85,6 @@ static void calculate_bandwidth(
const uint32_t s_mid5 = 5;
const uint32_t s_mid6 = 6;
const uint32_t s_high = 7;
- const uint32_t bus_efficiency = 1;
const uint32_t dmif_chunk_buff_margin = 1;
uint32_t max_chunks_fbc_mode;
@@ -592,7 +591,12 @@ static void calculate_bandwidth(
/* 1 = use channel 0 and 1*/
/* 2 = use channel 0,1,2,3*/
if ((fbc_enabled == 1 && lpt_enabled == 1)) {
- data->dram_efficiency = bw_int_to_fixed(1);
+ if (vbios->memory_type == bw_def_hbm)
+ data->dram_efficiency = bw_frc_to_fixed(5, 10);
+ else
+ data->dram_efficiency = bw_int_to_fixed(1);
+
+
if (dceip->low_power_tiling_mode == 0) {
data->number_of_dram_channels = 1;
}
@@ -607,7 +611,10 @@ static void calculate_bandwidth(
}
}
else {
- data->dram_efficiency = bw_frc_to_fixed(8, 10);
+ if (vbios->memory_type == bw_def_hbm)
+ data->dram_efficiency = bw_frc_to_fixed(5, 10);
+ else
+ data->dram_efficiency = bw_frc_to_fixed(8, 10);
}
/*memory request size and latency hiding:*/
/*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
@@ -1171,9 +1178,9 @@ static void calculate_bandwidth(
}
for (i = 0; i <= 2; i++) {
for (j = 0; j <= 7; j++) {
- data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))));
if (data->d1_display_write_back_dwb_enable == 1) {
- data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width))));
}
}
}
@@ -1258,6 +1265,16 @@ static void calculate_bandwidth(
/* / (dispclk - display bw)*/
/*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
/*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
+
+ /*initialize variables*/
+ number_of_displays_enabled = 0;
+ number_of_displays_enabled_with_margin = 0;
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ number_of_displays_enabled = number_of_displays_enabled + 1;
+ }
+ data->display_pstate_change_enable[k] = 0;
+ }
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
@@ -1276,7 +1293,10 @@ static void calculate_bandwidth(
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
- data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ if (number_of_displays_enabled > 2)
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ else
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
}
else {
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
@@ -1338,24 +1358,15 @@ static void calculate_bandwidth(
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1) {
- data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
}
else {
/*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
- data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
}
data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
}
}
- /*initialize variables*/
- number_of_displays_enabled = 0;
- number_of_displays_enabled_with_margin = 0;
- for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
- if (data->enable[k]) {
- number_of_displays_enabled = number_of_displays_enabled + 1;
- }
- data->display_pstate_change_enable[k] = 0;
- }
for (i = 0; i <= 2; i++) {
for (j = 0; j <= 7; j++) {
data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
@@ -1370,10 +1381,11 @@ static void calculate_bandwidth(
/*determine the minimum dram clock change margin for each set of clock frequencies*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
- data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
- if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
}
}
}
@@ -1383,10 +1395,11 @@ static void calculate_bandwidth(
/*determine the minimum dram clock change margin for each display pipe*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
- data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
- if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
}
}
}
@@ -1420,7 +1433,7 @@ static void calculate_bandwidth(
data->displays_with_same_mode[i] = bw_int_to_fixed(0);
if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
- if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
+ if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
}
}
@@ -1435,7 +1448,7 @@ static void calculate_bandwidth(
/*aligned displays with the same timing.*/
/*the display(s) with the negative margin can be switched in the v_blank region while the other*/
/*displays are in v_blank or v_active.*/
- if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
+ if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) {
data->nbp_state_change_enable = bw_def_yes;
}
else {
@@ -1448,6 +1461,25 @@ static void calculate_bandwidth(
else {
nbp_state_change_enable_blank = bw_def_no;
}
+
+ /*average bandwidth*/
+ /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
+ /*the average bandwidth with compression is the same, divided by the compression ratio*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
+ }
+ }
+ data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
+ data->total_average_bandwidth = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
+ data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
+ }
+ }
+
/*required yclk(pclk)*/
/*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
/*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
@@ -1497,17 +1529,20 @@ static void calculate_bandwidth(
}
else {
data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
- if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
+ if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
+ && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
yclk_message = bw_fixed_to_int(vbios->low_yclk);
data->y_clk_level = low;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
- else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
+ && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
yclk_message = bw_fixed_to_int(vbios->mid_yclk);
data->y_clk_level = mid;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
- else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
+ && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
yclk_message = bw_fixed_to_int(vbios->high_yclk);
data->y_clk_level = high;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
@@ -1523,8 +1558,8 @@ static void calculate_bandwidth(
/*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
/*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
/*for dmif, pte and cursor requests have to be included.*/
- data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
- data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
+ data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width);
if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
data->required_sclk = bw_int_to_fixed(9999);
sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
@@ -1537,42 +1572,56 @@ static void calculate_bandwidth(
}
else {
data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
- if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
+ if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_low;
data->sclk_level = s_low;
data->required_sclk = vbios->low_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid1;
data->required_sclk = vbios->mid1_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid2;
data->required_sclk = vbios->mid2_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid3;
data->required_sclk = vbios->mid3_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid4;
data->required_sclk = vbios->mid4_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid5;
data->required_sclk = vbios->mid5_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid6;
data->required_sclk = vbios->mid6_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_high])) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_high])) {
+ sclk_message = bw_def_high;
+ data->sclk_level = s_high;
+ data->required_sclk = vbios->high_sclk;
+ }
+ else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_high])) {
sclk_message = bw_def_high;
data->sclk_level = s_high;
data->required_sclk = vbios->high_sclk;
@@ -1681,7 +1730,7 @@ static void calculate_bandwidth(
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
}
- if (data->nbp_state_change_enable == bw_def_yes) {
+ if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) {
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
}
@@ -1861,23 +1910,6 @@ static void calculate_bandwidth(
else {
data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
}
- /*average bandwidth*/
- /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
- /*the average bandwidth with compression is the same, divided by the compression ratio*/
- for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
- if (data->enable[i]) {
- data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
- data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
- }
- }
- data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
- data->total_average_bandwidth = bw_int_to_fixed(0);
- for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
- if (data->enable[i]) {
- data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
- data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
- }
- }
/*stutter efficiency*/
/*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
/*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
@@ -1905,7 +1937,7 @@ static void calculate_bandwidth(
data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
}
}
- data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
+ data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width));
data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
data->time_in_self_refresh = data->min_stutter_refresh_duration;
@@ -1957,7 +1989,7 @@ static void calculate_bandwidth(
for (i = 1; i <= 5; i++) {
data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
- data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
}
else {
data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
@@ -2036,6 +2068,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2146,6 +2181,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2259,6 +2297,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2369,6 +2410,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2479,6 +2523,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
dceip.dmif_pipe_en_fbc_chunk_tracker = true;
@@ -2597,6 +2644,7 @@ static void populate_initial_data(
data->graphics_tiling_mode = bw_def_tiled;
data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
+ data->increase_voltage_to_support_mclk_switch = true;
/* Pipes with underlay first */
for (i = 0; i < pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 8020bc7742c1..4bb43a371292 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -983,8 +983,6 @@ bool dcn_validate_bandwidth(
context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
- context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
@@ -998,7 +996,26 @@ bool dcn_validate_bandwidth(
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.calc_clk.max_dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
+
+ switch (v->voltage_level) {
+ case 0:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
+ break;
+ case 1:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
+ break;
+ case 2:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
+ break;
+ default:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
+ break;
+ }
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8394d69b963f..63a3d468939a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -42,6 +42,7 @@
#include "dmcu.h"
#include "dpp.h"
#include "timing_generator.h"
+#include "abm.h"
#include "virtual/virtual_link_encoder.h"
#include "link_hwss.h"
@@ -802,6 +803,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb))
dc->hwss.enable_accelerated_mode(dc, context);
+ dc->hwss.set_bandwidth(dc, context, false);
+
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
@@ -870,6 +873,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+ /* pplib is notified if disp_num changed */
+ dc->hwss.set_bandwidth(dc, context, true);
+
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1104,9 +1110,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
if (u->plane_info->input_tf != u->surface->input_tf)
update_flags->bits.input_tf_change = 1;
- if (u->plane_info->sdr_white_level != u->surface->sdr_white_level)
- update_flags->bits.output_tf_change = 1;
-
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
update_flags->bits.horizontal_mirror_change = 1;
@@ -1361,6 +1364,17 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
+
+ if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
+ // if otg funcs defined check if blanked before programming
+ if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(
+ pipe_ctx->stream_res.abm, stream->abm_level);
+ } else
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(
+ pipe_ctx->stream_res.abm, stream->abm_level);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index c15565092ca8..5a552cb3f8a7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -36,26 +36,22 @@
#include "hw_sequencer.h"
#include "resource.h"
+#define DC_LOGGER \
+ logger
#define SURFACE_TRACE(...) do {\
if (dc->debug.surface_trace) \
- dm_logger_write(logger, \
- LOG_IF_TRACE, \
- ##__VA_ARGS__); \
+ DC_LOG_IF_TRACE(__VA_ARGS__); \
} while (0)
#define TIMING_TRACE(...) do {\
if (dc->debug.timing_trace) \
- dm_logger_write(logger, \
- LOG_SYNC, \
- ##__VA_ARGS__); \
+ DC_LOG_SYNC(__VA_ARGS__); \
} while (0)
#define CLOCK_TRACE(...) do {\
if (dc->debug.clock_trace) \
- dm_logger_write(logger, \
- LOG_BANDWIDTH_CALCS, \
- ##__VA_ARGS__); \
+ DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
} while (0)
void pre_surface_trace(
@@ -362,25 +358,19 @@ void context_clock_trace(
struct dal_logger *logger = core_dc->ctx->logger;
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
- "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n"
- "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.max_dppclk_khz,
+ context->bw.dcn.calc_clk.dppclk_khz,
context->bw.dcn.calc_clk.dcfclk_khz,
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz,
- context->bw.dcn.calc_clk.dram_ccm_us,
- context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+ context->bw.dcn.calc_clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
- "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n"
- "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.max_dppclk_khz,
+ context->bw.dcn.calc_clk.dppclk_khz,
context->bw.dcn.calc_clk.dcfclk_khz,
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.dram_ccm_us,
- context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+ context->bw.dcn.calc_clk.fclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f8c09273e0f1..eeb04471b2f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1960,6 +1960,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
(abm->funcs->set_backlight_level == NULL))
return false;
+ if (stream) {
+ if (stream->bl_pwm_level == 0)
+ frame_ramp = 0;
+
+ ((struct dc_stream_state *)stream)->bl_pwm_level = level;
+ }
+
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b9fc6d842931..ba3487e97361 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1124,6 +1124,7 @@ bool dc_add_plane_to_context(
ASSERT(tail_pipe);
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
+ free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
@@ -1736,6 +1737,10 @@ enum dc_status resource_map_pool_resources(
pipe_ctx->stream_res.audio, true);
}
+ /* Add ABM to the resource if on EDP */
+ if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.abm = pool->abm;
+
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index cd5819789d76..ce0747ed0f00 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -198,8 +198,7 @@ bool dc_stream_set_cursor_attributes(
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
- !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
+ if (pipe_ctx->stream != stream)
continue;
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 2cd97342bf0f..fa4b3c8b3bb7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.37"
+#define DC_VER "3.1.38"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -186,13 +186,12 @@ enum wm_report_mode {
struct dc_clocks {
int dispclk_khz;
- int max_dppclk_khz;
+ int max_supported_dppclk_khz;
+ int dppclk_khz;
int dcfclk_khz;
int socclk_khz;
int dcfclk_deep_sleep_khz;
int fclk_khz;
- int dram_ccm_us;
- int min_active_dram_ccm_us;
};
struct dc_debug {
@@ -447,6 +446,7 @@ union surface_update_flags {
struct dc_plane_state {
struct dc_plane_address address;
+ struct dc_plane_flip_time time;
struct scaling_taps scaling_quality;
struct rect src_rect;
struct rect dst_rect;
@@ -557,6 +557,7 @@ struct dc_transfer_func *dc_create_transfer_func(void);
*/
struct dc_flip_addrs {
struct dc_plane_address address;
+ unsigned int flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index e91ac6811990..b83a7dc2f5a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -692,8 +692,18 @@ struct crtc_trigger_info {
enum trigger_delay delay;
};
-struct dc_crtc_timing {
+enum vrr_state {
+ VRR_STATE_OFF = 0,
+ VRR_STATE_VARIABLE,
+ VRR_STATE_FIXED,
+};
+struct dc_crtc_timing_adjust {
+ uint32_t v_total_min;
+ uint32_t v_total_max;
+};
+
+struct dc_crtc_timing {
uint32_t h_total;
uint32_t h_border_left;
uint32_t h_addressable;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index f44cd4d87b79..d017df56b2ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -48,6 +48,8 @@ struct dc_stream_status {
struct dc_stream_state {
struct dc_sink *sink;
struct dc_crtc_timing timing;
+ struct dc_crtc_timing_adjust timing_adjust;
+ struct vrr_params vrr_params;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -74,6 +76,10 @@ struct dc_stream_state {
unsigned char psr_version;
/* TODO: CEA VIC */
+ /* DMCU info */
+ unsigned int abm_level;
+ unsigned int bl_pwm_level;
+
/* from core_stream struct */
struct dc_context *ctx;
@@ -106,6 +112,7 @@ struct dc_stream_update {
struct dc_transfer_func *out_transfer_func;
struct dc_hdr_static_metadata *hdr_static_metadata;
enum color_transfer_func color_output_tf;
+ unsigned int *abm_level;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 8811b6f86bff..9441305d3ab5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -521,6 +521,24 @@ struct audio_info {
struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
};
+struct vrr_params {
+ enum vrr_state state;
+ uint32_t window_min;
+ uint32_t window_max;
+ uint32_t inserted_frame_duration_in_us;
+ uint32_t frames_to_insert;
+ uint32_t frame_counter;
+};
+
+#define DC_PLANE_UPDATE_TIMES_MAX 10
+
+struct dc_plane_flip_time {
+ unsigned int time_elapsed_in_us[DC_PLANE_UPDATE_TIMES_MAX];
+ unsigned int index;
+ unsigned int prev_update_time_in_us;
+};
+
+// Will combine with vrr_params at some point.
struct freesync_context {
bool supported;
bool enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 1d4546f23135..c24c0e5ea44e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -46,6 +46,23 @@
SR(SMU_INTERRUPT_CONTROL), \
SR(DC_DMCU_SCRATCH)
+#define DMCU_DCE80_REG_LIST() \
+ SR(DMCU_CTRL), \
+ SR(DMCU_STATUS), \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_WR_CTRL), \
+ SR(DMCU_IRAM_WR_DATA), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(MASTER_COMM_DATA_REG2), \
+ SR(MASTER_COMM_DATA_REG3), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SR(SMU_INTERRUPT_CONTROL), \
+ SR(DC_DMCU_SCRATCH)
+
#define DMCU_DCE110_COMMON_REG_LIST() \
DMCU_COMMON_REG_LIST_DCE_BASE(), \
SR(DCI_MEM_PWR_STATUS)
@@ -83,6 +100,24 @@
STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
+#define DMCU_MASK_SH_LIST_DCE80(mask_sh) \
+ DMCU_SF(DMCU_CTRL, \
+ DMCU_ENABLE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_STOP_MODE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_RESET, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_HOST_ACCESS_EN, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_WR_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_RD_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(MASTER_COMM_CMD_REG, \
+ MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
+
#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
DMCU_SF(DCI_MEM_PWR_STATUS, \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 4b8e7ce2de8c..487724345d9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -56,7 +56,7 @@ void dce_pipe_control_lock(struct dc *dc,
if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
return;
- val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx],
+ val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
BLND_SCL_V_UPDATE_LOCK, &scl,
BLND_BLND_V_UPDATE_LOCK, &blnd,
@@ -67,19 +67,19 @@ void dce_pipe_control_lock(struct dc *dc,
blnd = lock_val;
update_lock_mode = lock_val;
- REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
BLND_SCL_V_UPDATE_LOCK, scl);
if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
- REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
BLND_BLND_V_UPDATE_LOCK, blnd,
BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
if (hws->wa.blnd_crtc_trigger) {
if (!lock) {
- uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]);
- REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value);
+ uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst]);
+ REG_WRITE(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst], value);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 3336428b1fed..057b8afd74bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -190,6 +190,7 @@
SR(D2VGA_CONTROL), \
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
+ SR(VGA_TEST_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST()
@@ -261,6 +262,7 @@ struct dce_hwseq_registers {
uint32_t D2VGA_CONTROL;
uint32_t D3VGA_CONTROL;
uint32_t D4VGA_CONTROL;
+ uint32_t VGA_TEST_CONTROL;
/* MMHUB registers. read only. temporary hack */
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -327,6 +329,8 @@ struct dce_hwseq_registers {
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
@@ -403,7 +407,15 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
#define HWSEQ_REG_FIELD_LIST(type) \
@@ -436,7 +448,9 @@ struct dce_hwseq_registers {
type ENABLE_L1_TLB;\
type SYSTEM_ACCESS_MODE;\
type LVTMA_BLON;\
- type LVTMA_PWRSEQ_TARGET_STATE_R;
+ type LVTMA_PWRSEQ_TARGET_STATE_R;\
+ type LVTMA_DIGON;\
+ type LVTMA_DIGON_OVRD;
#define HWSEQ_DCN_REG_FIELD_LIST(type) \
type HUBP_VTG_SEL; \
@@ -483,7 +497,13 @@ struct dce_hwseq_registers {
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
type DENTIST_DPPCLK_WDIVIDER; \
- type DENTIST_DISPCLK_WDIVIDER;
+ type DENTIST_DISPCLK_WDIVIDER; \
+ type VGA_TEST_ENABLE; \
+ type VGA_TEST_RENDER_START; \
+ type D1VGA_MODE_ENABLE; \
+ type D2VGA_MODE_ENABLE; \
+ type D3VGA_MODE_ENABLE; \
+ type D4VGA_MODE_ENABLE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 81776e4797ed..8167cad7bcf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -767,8 +767,7 @@ void dce110_link_encoder_construct(
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
} else {
- dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
- "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3bdbed80f7f8..3092f76bdb75 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -51,6 +51,9 @@
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
+#include "dce/dce_dmcu.h"
+#include "dce/dce_abm.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_8_2_d.h"
#include "gmc/gmc_8_2_sh_mask.h"
@@ -320,7 +323,29 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
@@ -622,6 +647,12 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.display_clock != NULL)
dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
if (pool->base.irqs != NULL)
dal_irq_service_destroy(&pool->base.irqs);
}
@@ -829,6 +860,25 @@ static bool construct(
goto res_create_fail;
}
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
/* get static clock information for PPLIB or firmware, save
* max_clock_state
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index c2041a63cccd..30dd62f0f5fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -737,10 +737,14 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
static bool is_panel_powered_on(struct dce_hwseq *hws)
{
- uint32_t value;
+ uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
+
+
+ REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
+
+ REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
- REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value);
- return value == 1;
+ return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
}
static enum bp_result link_transmitter_control(
@@ -1002,8 +1006,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
- if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
link->dc->hwss.edp_backlight_control(link, true);
+ stream->bl_pwm_level = 0;
+ }
}
void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
{
@@ -1128,7 +1134,7 @@ static void build_audio_output(
static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
- uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4;
+ uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4;
switch (pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB8888:
@@ -2106,9 +2112,6 @@ enum dc_status dce110_apply_ctx_to_hw(
return status;
}
- /* pplib is notified if disp_num changed */
- dc->hwss.set_bandwidth(dc, context, true);
-
/* to save power */
apply_min_clocks(dc, context, &clocks_state, false);
@@ -2936,15 +2939,18 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
- if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
+ if (pipe_ctx->plane_res.ipp &&
+ pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
pipe_ctx->plane_res.ipp, attributes);
- if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
+ if (pipe_ctx->plane_res.mi &&
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
pipe_ctx->plane_res.mi, attributes);
- if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
+ if (pipe_ctx->plane_res.xfm &&
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
pipe_ctx->plane_res.xfm, attributes);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index a36c14d3d9a8..5d854a37a978 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -53,6 +53,8 @@
#include "reg_helper.h"
+#include "dce/dce_dmcu.h"
+#include "dce/dce_abm.h"
/* TODO remove this include */
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -364,6 +366,29 @@ static const struct resource_caps res_cap_83 = {
.num_pll = 2,
};
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE80_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE80(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE80(_MASK)
+};
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -643,6 +668,12 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
if (pool->base.dp_clock_source != NULL)
dce80_clock_source_destroy(&pool->base.dp_clock_source);
@@ -850,7 +881,25 @@ static bool dce80_construct(
goto res_create_fail;
}
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
pool->base.display_clock->max_clks_state =
static_clk_info.max_clocks_state;
@@ -1016,6 +1065,25 @@ static bool dce81_construct(
goto res_create_fail;
}
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
pool->base.display_clock->max_clks_state =
@@ -1178,6 +1246,25 @@ static bool dce83_construct(
goto res_create_fail;
}
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
pool->base.display_clock->max_clks_state =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index f0b798930b51..e305c28c98de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -464,6 +464,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.set_cursor_attributes = dpp1_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
.dpp_dppclk_control = dpp1_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
};
static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 07003d9c6bba..17b062a8f88a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -113,7 +113,8 @@
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
- SRI(DPP_CONTROL, DPP_TOP, id)
+ SRI(DPP_CONTROL, DPP_TOP, id), \
+ SRI(CM_HDR_MULT_COEF, CM, id)
@@ -308,7 +309,8 @@
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
- TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh)
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
+ TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
TF_REG_LIST_SH_MASK_DCN(mask_sh),\
@@ -1012,7 +1014,8 @@
type CUR0_COLOR0; \
type CUR0_COLOR1; \
type DPPCLK_RATE_CONTROL; \
- type DPP_CLOCK_ENABLE;
+ type DPP_CLOCK_ENABLE; \
+ type CM_HDR_MULT_COEF;
struct dcn_dpp_shift {
TF_REG_FIELD_LIST(uint8_t)
@@ -1258,7 +1261,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_CONTROL; \
uint32_t CURSOR0_COLOR0; \
uint32_t CURSOR0_COLOR1; \
- uint32_t DPP_CONTROL;
+ uint32_t DPP_CONTROL; \
+ uint32_t CM_HDR_MULT_COEF;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1414,6 +1418,10 @@ void dpp1_dppclk_control(
bool dppclk_div,
bool enable);
+void dpp1_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier);
+
void dpp1_construct(struct dcn10_dpp *dpp1,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index bd3fcdfb79c5..fb32975e4b67 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -804,3 +804,12 @@ void dpp1_program_input_lut(
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
}
+
+void dpp1_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1907ade1574a..8b0f6b8a5627 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -220,10 +220,34 @@ static void enable_power_gating_plane(
static void disable_vga(
struct dce_hwseq *hws)
{
+ unsigned int in_vga1_mode = 0;
+ unsigned int in_vga2_mode = 0;
+ unsigned int in_vga3_mode = 0;
+ unsigned int in_vga4_mode = 0;
+
+ REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
+ REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
+ REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
+ REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
+
+ if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
+ in_vga3_mode == 0 && in_vga4_mode == 0)
+ return;
+
REG_WRITE(D1VGA_CONTROL, 0);
REG_WRITE(D2VGA_CONTROL, 0);
REG_WRITE(D3VGA_CONTROL, 0);
REG_WRITE(D4VGA_CONTROL, 0);
+
+ /* HW Engineer's Notes:
+ * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
+ * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
+ *
+ * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
+ * VGA_TEST_ENABLE, to leave it in the same state as before.
+ */
+ REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
+ REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
}
static void dpp_pg_control(
@@ -1685,16 +1709,22 @@ static void update_dchubp_dpp(
union plane_size size = plane_state->plane_size;
/* depends on DML calculation, DPP clock value may change dynamically */
+ /* If request max dpp clk is lower than current dispclk, no need to
+ * divided by 2
+ */
if (plane_state->update_flags.bits.full_update) {
+ bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
+ context->bw.dcn.cur_clk.dispclk_khz / 2;
+
dpp->funcs->dpp_dppclk_control(
dpp,
- context->bw.dcn.calc_clk.max_dppclk_khz <
- context->bw.dcn.calc_clk.dispclk_khz,
+ should_divided_by_2,
true);
- dc->current_state->bw.dcn.cur_clk.max_dppclk_khz =
- context->bw.dcn.calc_clk.max_dppclk_khz;
- context->bw.dcn.cur_clk.max_dppclk_khz = context->bw.dcn.calc_clk.max_dppclk_khz;
+ dc->current_state->bw.dcn.cur_clk.dppclk_khz =
+ should_divided_by_2 ?
+ context->bw.dcn.cur_clk.dispclk_khz / 2 :
+ context->bw.dcn.cur_clk.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -1780,14 +1810,62 @@ static void update_dchubp_dpp(
hubp->funcs->set_blank(hubp, false);
}
+static void dcn10_otg_blank(
+ struct dc *dc,
+ struct stream_resource stream_res,
+ struct dc_stream_state *stream,
+ bool blank)
+{
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+
+ /* program otg blank color */
+ color_space = stream->output_color_space;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ if (stream_res.tg->funcs->set_blank_color)
+ stream_res.tg->funcs->set_blank_color(
+ stream_res.tg,
+ &black_color);
+
+ if (!blank) {
+ if (stream_res.tg->funcs->set_blank)
+ stream_res.tg->funcs->set_blank(stream_res.tg, blank);
+ if (stream_res.abm)
+ stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
+ } else if (blank) {
+ if (stream_res.abm)
+ stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
+ if (stream_res.tg->funcs->set_blank)
+ stream_res.tg->funcs->set_blank(stream_res.tg, blank);
+ }
+}
+
+static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+{
+ struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
+ pipe_ctx->plane_state->sdr_white_level, 80);
+ uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (pipe_ctx->plane_state->sdr_white_level > 80)
+ convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
+ pipe_ctx->plane_res.dpp, hw_mult);
+}
static void program_all_pipe_in_tree(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
-
if (pipe_ctx->top_pipe == NULL) {
+ bool blank = !is_pipe_tree_visible(pipe_ctx);
pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
@@ -1798,10 +1876,8 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
- if (pipe_ctx->stream_res.tg->funcs->set_blank)
- pipe_ctx->stream_res.tg->funcs->set_blank(
- pipe_ctx->stream_res.tg,
- !is_pipe_tree_visible(pipe_ctx));
+ dcn10_otg_blank(dc, pipe_ctx->stream_res,
+ pipe_ctx->stream, blank);
}
if (pipe_ctx->plane_state != NULL) {
@@ -1810,6 +1886,8 @@ static void program_all_pipe_in_tree(
update_dchubp_dpp(dc, pipe_ctx, context);
+ set_hdr_multiplier(pipe_ctx);
+
if (pipe_ctx->plane_state->update_flags.bits.full_update ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change)
@@ -1836,16 +1914,10 @@ static void dcn10_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->all_displays_in_sync = false;/*todo*/
- pp_display_cfg->nb_pstate_switch_disable = false;
pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
- pp_display_cfg->avail_mclk_switch_time_us =
- context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
- context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
@@ -1908,29 +1980,23 @@ static void dcn10_apply_ctx_for_surface(
{
int i;
struct timing_generator *tg;
- struct output_pixel_processor *opp;
bool removed_pipe[4] = { false };
unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
bool program_water_mark = false;
struct dc_context *ctx = dc->ctx;
-
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
if (!top_pipe_to_program)
return;
- opp = top_pipe_to_program->stream_res.opp;
-
tg = top_pipe_to_program->stream_res.tg;
dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (num_planes == 0) {
-
/* OTG blank before remove all front end */
- if (tg->funcs->set_blank)
- tg->funcs->set_blank(tg, true);
+ dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
}
/* Disconnect unused mpcc */
@@ -2056,6 +2122,101 @@ static void dcn10_apply_ctx_for_surface(
*/
}
+static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
+{
+ return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
+{
+ bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
+ context->bw.dcn.calc_clk.dppclk_khz;
+ bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
+ context->bw.dcn.cur_clk.dispclk_khz;
+ int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+ bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
+ context->bw.dcn.cur_clk.dppclk_khz;
+
+ /* increase clock, looking for div is 0 for current, request div is 1*/
+ if (dispclk_increase) {
+ /* already divided by 2, no need to reach target clk with 2 steps*/
+ if (cur_dpp_div)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ /* request disp clk is lower than maximum supported dpp clk,
+ * no need to reach target clk with two steps.
+ */
+ if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ /* target dpp clk not request divided by 2, still within threshold */
+ if (!request_dpp_div)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ } else {
+ /* decrease clock, looking for current dppclk divided by 2,
+ * request dppclk not divided by 2.
+ */
+
+ /* current dpp clk not divided by 2, no need to ramp*/
+ if (!cur_dpp_div)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ /* current disp clk is lower than current maximum dpp clk,
+ * no need to ramp
+ */
+ if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ /* request dpp clk need to be divided by 2 */
+ if (request_dpp_div)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+ }
+
+ return disp_clk_threshold;
+}
+
+static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
+ context->bw.dcn.calc_clk.dppclk_khz;
+
+ int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
+
+ /* set disp clk to dpp clk threshold */
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+ pipe_ctx->plane_res.dpp,
+ request_dpp_div,
+ true);
+ }
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ }
+
+ context->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ context->bw.dcn.cur_clk.dppclk_khz =
+ context->bw.dcn.calc_clk.dppclk_khz;
+ context->bw.dcn.cur_clk.max_supported_dppclk_khz =
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+}
+
static void dcn10_set_bandwidth(
struct dc *dc,
struct dc_state *context,
@@ -2073,32 +2234,32 @@ static void dcn10_set_bandwidth(
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
return;
- if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz
- > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dcn.calc_clk.dispclk_khz);
- context->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- }
- if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
- > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+ dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
context->bw.dcn.cur_clk.dcfclk_khz =
context->bw.dcn.calc_clk.dcfclk_khz;
smu_req.hard_min_dcefclk_khz =
context->bw.dcn.calc_clk.dcfclk_khz;
}
- if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz
- > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
+ context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+ }
+
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.fclk_khz,
+ dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
context->bw.dcn.cur_clk.fclk_khz =
context->bw.dcn.calc_clk.fclk_khz;
smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
}
- if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
- > dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
- context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
- }
smu_req.display_count = context->stream_count;
@@ -2107,17 +2268,17 @@ static void dcn10_set_bandwidth(
*smu_req_cur = smu_req;
- /* Decrease in freq is increase in period so opposite comparison for dram_ccm */
- if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us
- < dc->current_state->bw.dcn.cur_clk.dram_ccm_us) {
- context->bw.dcn.cur_clk.dram_ccm_us =
- context->bw.dcn.calc_clk.dram_ccm_us;
- }
- if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us
- < dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) {
- context->bw.dcn.cur_clk.min_active_dram_ccm_us =
- context->bw.dcn.calc_clk.min_active_dram_ccm_us;
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.dispclk_khz,
+ dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
+
+ ramp_up_dispclk_with_dpp(dc, context);
}
+
dcn10_pplib_apply_display_requirements(dc, context);
if (dc->debug.sanity_checks) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index c4a564cb56b9..02bd664aed3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -440,7 +440,11 @@ static const struct dc_debug debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
- .min_disp_clk_khz = 300000,
+ /* raven smu dones't allow 0 disp clk,
+ * smu min disp clk limit is 50Mhz
+ * keep min disp clk 100Mhz avoid smu hang
+ */
+ .min_disp_clk_khz = 100000,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = false,
@@ -963,6 +967,7 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
idle_pipe->stream = head_pipe->stream;
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.abm = head_pipe->stream_res.abm;
idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 189052e911fc..48400d642610 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -24,6 +24,7 @@
*/
#include "display_rq_dlg_helpers.h"
+#include "dml_logger.h"
void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
index b2847bc469fe..f78cbae9db88 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
@@ -31,8 +31,6 @@
#include "display_mode_structs.h"
#include "display_mode_enums.h"
-#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
-#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
double dml_round(double a);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index e68086b8a22f..f9cf08357989 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -28,6 +28,7 @@
#include "dml_common_defs.h"
#include "../calcs/dcn_calc_math.h"
+#include "dml_logger.h"
static inline double dml_min(double a, double b)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
index c0c4bfdcdb14..465859b77248 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,11 +23,16 @@
*
*/
-#ifndef __AMDGPU_POWERPLAY_H__
-#define __AMDGPU_POWERPLAY_H__
-#include "amd_shared.h"
+#ifndef __DML_LOGGER_H_
+#define __DML_LOGGER_H_
+
+#define DC_LOGGER \
+ mode_lib->logger
+
+#define dml_print(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
+#define DTRACE(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
+
+#endif
-extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
-#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b8f05384a897..8c51ad70cace 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -194,6 +194,8 @@ struct stream_resource {
struct pixel_clk_params pix_clk_params;
struct encoder_info_frame encoder_info_frame;
+
+ struct abm *abm;
};
struct plane_resource {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index ae2399f16d1c..a9bfe9ff8ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -130,6 +130,9 @@ enum bw_defines {
struct bw_calcs_dceip {
enum bw_calcs_version version;
+ uint32_t percent_of_ideal_port_bw_received_after_urgent_latency;
+ uint32_t max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation;
+ uint32_t max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation;
bool large_cursor;
uint32_t cursor_max_outstanding_group_num;
bool dmif_pipe_en_fbc_chunk_tracker;
@@ -230,6 +233,7 @@ struct bw_calcs_vbios {
struct bw_calcs_data {
/* data for all displays */
+ bool display_synchronization_enabled;
uint32_t number_of_displays;
enum bw_defines underlay_surface_type;
enum bw_defines panning_and_bezel_adjustment;
@@ -241,6 +245,7 @@ struct bw_calcs_data {
bool d1_display_write_back_dwb_enable;
enum bw_defines d1_underlay_mode;
+ bool increase_voltage_to_support_mclk_switch;
bool cpup_state_change_enable;
bool cpuc_state_change_enable;
bool nbp_state_change_enable;
@@ -449,6 +454,7 @@ struct bw_calcs_data {
struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
struct bw_fixed min_dram_speed_change_margin[3][8];
struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
+ struct bw_fixed dispclk_required_for_dram_speed_change_pipe[3][8];
struct bw_fixed blackout_duration_margin[3][8];
struct bw_fixed dispclk_required_for_blackout_duration[3][8];
struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index c5aae2daf442..99995608b620 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -132,6 +132,9 @@ struct dpp_funcs {
const struct dc_cursor_mi_param *param,
uint32_t width
);
+ void (*dpp_set_hdr_multiplier)(
+ struct dpp *dpp_base,
+ uint32_t multiplier);
void (*dpp_dppclk_control)(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index b727f5eeb3a9..427796bdc14a 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -98,6 +98,7 @@ enum dc_log_type {
LOG_EVENT_UNDERFLOW,
LOG_IF_TRACE,
LOG_PERF_TRACE,
+ LOG_PROFILING,
LOG_SECTION_TOTAL_COUNT
};
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 57d5c2575de1..e7e374f56864 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1267,7 +1267,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
bool ret = false;
struct pwl_float_data_ex *rgb_regamma = NULL;
- if (trans == TRANSFER_FUNCTION_UNITY) {
+ if (trans == TRANSFER_FUNCTION_UNITY ||
+ trans == TRANSFER_FUNCTION_LINEAR) {
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
points->x_point_at_y1_green = 1;
@@ -1337,7 +1338,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
bool ret = false;
struct pwl_float_data_ex *rgb_degamma = NULL;
- if (trans == TRANSFER_FUNCTION_UNITY) {
+ if (trans == TRANSFER_FUNCTION_UNITY ||
+ trans == TRANSFER_FUNCTION_LINEAR) {
for (i = 0; i <= MAX_HW_POINTS ; i++) {
points->red[i] = coordinates_x[i].x;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b4723af368a5..27d4003aa2c7 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -33,7 +33,7 @@
/* Refresh rate ramp at a fixed rate of 65 Hz/second */
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
-#define RENDER_TIMES_MAX_COUNT 20
+#define RENDER_TIMES_MAX_COUNT 10
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
#define BTR_EXIT_MARGIN 2000
/* Number of consecutive frames to check before entering/exiting fixed refresh*/
@@ -46,13 +46,15 @@
#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
+#define FREESYNC_DEFAULT_REGKEY "LCDFreeSyncDefault"
+
struct gradual_static_ramp {
bool ramp_is_active;
bool ramp_direction_is_up;
unsigned int ramp_current_frame_duration_in_ns;
};
-struct time_cache {
+struct freesync_time {
/* video (48Hz feature) related */
unsigned int update_duration_in_ns;
@@ -64,6 +66,9 @@ struct time_cache {
unsigned int render_times_index;
unsigned int render_times[RENDER_TIMES_MAX_COUNT];
+
+ unsigned int min_window;
+ unsigned int max_window;
};
struct below_the_range {
@@ -98,11 +103,14 @@ struct freesync_state {
bool static_screen;
bool video;
+ unsigned int vmin;
+ unsigned int vmax;
+
+ struct freesync_time time;
+
unsigned int nominal_refresh_rate_in_micro_hz;
bool windowed_fullscreen;
- struct time_cache time;
-
struct gradual_static_ramp static_ramp;
struct below_the_range btr;
struct fixed_refresh fixed_refresh;
@@ -119,14 +127,16 @@ struct freesync_entity {
struct freesync_registry_options {
bool drr_external_supported;
bool drr_internal_supported;
+ bool lcd_freesync_default_set;
+ int lcd_freesync_default_value;
};
struct core_freesync {
struct mod_freesync public;
struct dc *dc;
+ struct freesync_registry_options opts;
struct freesync_entity *map;
int num_entities;
- struct freesync_registry_options opts;
};
#define MOD_FREESYNC_TO_CORE(mod_freesync)\
@@ -146,7 +156,7 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
goto fail_alloc_context;
core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
- GFP_KERNEL);
+ GFP_KERNEL);
if (core_freesync->map == NULL)
goto fail_alloc_map;
@@ -183,6 +193,16 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
(data & 1) ? false : true;
}
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_DEFAULT_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.lcd_freesync_default_set = true;
+ core_freesync->opts.lcd_freesync_default_value = data;
+ } else {
+ core_freesync->opts.lcd_freesync_default_set = false;
+ core_freesync->opts.lcd_freesync_default_value = 0;
+ }
+
return &core_freesync->public;
fail_construct:
@@ -288,6 +308,18 @@ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
core_freesync->map[core_freesync->num_entities].user_enable.
enable_for_video =
(persistent_freesync_enable & 4) ? true : false;
+ /* If FreeSync display and LCDFreeSyncDefault is set, use as default values write back to userenable */
+ } else if (caps->supported && (core_freesync->opts.lcd_freesync_default_set)) {
+ core_freesync->map[core_freesync->num_entities].user_enable.enable_for_gaming =
+ (core_freesync->opts.lcd_freesync_default_value & 1) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.enable_for_static =
+ (core_freesync->opts.lcd_freesync_default_value & 2) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.enable_for_video =
+ (core_freesync->opts.lcd_freesync_default_value & 4) ? true : false;
+ dm_write_persistent_data(dc->ctx, stream->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable", &core_freesync->opts.lcd_freesync_default_value,
+ sizeof(int), &flag);
} else {
core_freesync->map[core_freesync->num_entities].user_enable.
enable_for_gaming = false;
@@ -330,6 +362,25 @@ bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
return true;
}
+static void adjust_vmin_vmax(struct core_freesync *core_freesync,
+ struct dc_stream_state **streams,
+ int num_streams,
+ int map_index,
+ unsigned int v_total_min,
+ unsigned int v_total_max)
+{
+ if (num_streams == 0 || streams == NULL || num_streams > 1)
+ return;
+
+ core_freesync->map[map_index].state.vmin = v_total_min;
+ core_freesync->map[map_index].state.vmax = v_total_max;
+
+ dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
+ num_streams, v_total_min,
+ v_total_max);
+}
+
+
static void update_stream_freesync_context(struct core_freesync *core_freesync,
struct dc_stream_state *stream)
{
@@ -588,9 +639,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
update_stream_freesync_context(core_freesync,
streams[stream_idx]);
- dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
- num_streams, v_total_min,
- v_total_max);
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, map_index,
+ v_total_min,
+ v_total_max);
return true;
@@ -613,9 +665,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
core_freesync,
streams[stream_idx]);
- dc_stream_adjust_vmin_vmax(
- core_freesync->dc, streams,
- num_streams, v_total_nominal,
+ adjust_vmin_vmax(
+ core_freesync, streams,
+ num_streams, map_index,
+ v_total_nominal,
v_total_nominal);
}
return true;
@@ -632,9 +685,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
core_freesync,
streams[stream_idx]);
- dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
- num_streams, v_total_nominal,
- v_total_nominal);
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, map_index,
+ v_total_nominal,
+ v_total_nominal);
/* Reset the cached variables */
reset_freesync_state_variables(state);
@@ -650,9 +704,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
* not support freesync because a former stream has
* be programmed
*/
- dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
- num_streams, v_total_nominal,
- v_total_nominal);
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, map_index,
+ v_total_nominal,
+ v_total_nominal);
/* Reset the cached variables */
reset_freesync_state_variables(state);
}
@@ -769,8 +824,9 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
vmin = inserted_frame_v_total;
/* Program V_TOTAL */
- dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
- num_streams, vmin, vmax);
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, index,
+ vmin, vmax);
}
if (state->btr.frame_counter > 0)
@@ -804,9 +860,10 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
update_stream_freesync_context(core_freesync, streams[0]);
/* Program static screen ramp values */
- dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
- num_streams, v_total,
- v_total);
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, index,
+ v_total,
+ v_total);
triggers.overlay_update = true;
triggers.surface_update = true;
@@ -1063,9 +1120,9 @@ bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
max_refresh);
/* Program vtotal min/max */
- dc_stream_adjust_vmin_vmax(core_freesync->dc, &streams, 1,
- state->freesync_range.vmin,
- state->freesync_range.vmax);
+ adjust_vmin_vmax(core_freesync, &streams, 1, index,
+ state->freesync_range.vmin,
+ state->freesync_range.vmax);
}
if (min_refresh != 0 &&
@@ -1399,11 +1456,9 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
} else {
vmin = state->freesync_range.vmin;
-
vmax = vmin;
-
- dc_stream_adjust_vmin_vmax(core_freesync->dc, &stream,
- 1, vmin, vmax);
+ adjust_vmin_vmax(core_freesync, &stream, map_index,
+ 1, vmin, vmax);
}
}
@@ -1457,3 +1512,43 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
}
}
+
+void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_total_min, unsigned int *v_total_max,
+ unsigned int *event_triggers,
+ unsigned int *window_min, unsigned int *window_max,
+ unsigned int *lfc_mid_point_in_us,
+ unsigned int *inserted_frames,
+ unsigned int *inserted_duration_in_us)
+{
+ unsigned int stream_index, map_index;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (stream_index = 0; stream_index < num_streams; stream_index++) {
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ if (core_freesync->map[map_index].caps->supported) {
+ struct freesync_state state =
+ core_freesync->map[map_index].state;
+ *v_total_min = state.vmin;
+ *v_total_max = state.vmax;
+ *event_triggers = 0;
+ *window_min = state.time.min_window;
+ *window_max = state.time.max_window;
+ *lfc_mid_point_in_us = state.btr.mid_point_in_us;
+ *inserted_frames = state.btr.frames_to_insert;
+ *inserted_duration_in_us =
+ state.btr.inserted_frame_duration_in_us;
+ }
+
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 84b53425f2c8..f083e1619dbe 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -164,4 +164,13 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
struct dc_stream_state **streams, int num_streams,
unsigned int curr_time_stamp);
+void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_total_min, unsigned int *v_total_max,
+ unsigned int *event_triggers,
+ unsigned int *window_min, unsigned int *window_max,
+ unsigned int *lfc_mid_point_in_us,
+ unsigned int *inserted_frames,
+ unsigned int *inserted_duration_in_us);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 0c1593e53654..3230e2adb870 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,29 +19,47 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: AMD
+ *
*/
-#ifndef PP_ASICBLOCKS_H
-#define PP_ASICBLOCKS_H
+#ifndef MODULES_INC_MOD_STATS_H_
+#define MODULES_INC_MOD_STATS_H_
-enum PHM_AsicBlock {
- PHM_AsicBlock_GFX,
- PHM_AsicBlock_UVD_MVC,
- PHM_AsicBlock_UVD,
- PHM_AsicBlock_UVD_HD,
- PHM_AsicBlock_UVD_SD,
- PHM_AsicBlock_Count
-};
+#include "dm_services.h"
-enum PHM_ClockGateSetting {
- PHM_ClockGateSetting_StaticOn,
- PHM_ClockGateSetting_StaticOff,
- PHM_ClockGateSetting_Dynamic
+struct mod_stats {
+ int dummy;
};
-struct phm_asic_blocks {
- bool gfx : 1;
- bool uvd : 1;
+struct mod_stats_caps {
+ bool dummy;
};
-#endif
+struct mod_stats *mod_stats_create(struct dc *dc);
+
+void mod_stats_destroy(struct mod_stats *mod_stats);
+
+bool mod_stats_init(struct mod_stats *mod_stats);
+
+void mod_stats_dump(struct mod_stats *mod_stats);
+
+void mod_stats_reset_data(struct mod_stats *mod_stats);
+
+void mod_stats_update_flip(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns);
+
+void mod_stats_update_vupdate(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns);
+
+void mod_stats_update_freesync(struct mod_stats *mod_stats,
+ unsigned int v_total_min,
+ unsigned int v_total_max,
+ unsigned int event_triggers,
+ unsigned int window_min,
+ unsigned int window_max,
+ unsigned int lfc_mid_point_in_us,
+ unsigned int inserted_frames,
+ unsigned int inserted_frame_duration_in_us);
+
+#endif /* MODULES_INC_MOD_STATS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
new file mode 100644
index 000000000000..041f87b73d5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "mod_stats.h"
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+
+#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
+#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
+#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
+
+#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
+#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
+#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
+
+#define MOD_STATS_NUM_VSYNCS 5
+
+struct stats_time_cache {
+ unsigned long flip_timestamp_in_ns;
+ unsigned long vupdate_timestamp_in_ns;
+
+ unsigned int render_time_in_us;
+ unsigned int avg_render_time_in_us_last_ten;
+ unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
+ unsigned int num_vsync_between_flips;
+
+ unsigned int flip_to_vsync_time_in_us;
+ unsigned int vsync_to_flip_time_in_us;
+
+ unsigned int min_window;
+ unsigned int max_window;
+ unsigned int v_total_min;
+ unsigned int v_total_max;
+ unsigned int event_triggers;
+
+ unsigned int lfc_mid_point_in_us;
+ unsigned int num_frames_inserted;
+ unsigned int inserted_duration_in_us;
+
+ unsigned int flags;
+};
+
+struct core_stats {
+ struct mod_stats public;
+ struct dc *dc;
+
+ struct stats_time_cache *time;
+ unsigned int index;
+
+ bool enabled;
+ unsigned int entries;
+};
+
+#define MOD_STATS_TO_CORE(mod_stats)\
+ container_of(mod_stats, struct core_stats, public)
+
+bool mod_stats_init(struct mod_stats *mod_stats)
+{
+ bool result = false;
+ struct core_stats *core_stats = NULL;
+ struct dc *dc = NULL;
+
+ if (mod_stats == NULL)
+ return false;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+ dc = core_stats->dc;
+
+ return result;
+}
+
+struct mod_stats *mod_stats_create(struct dc *dc)
+{
+ struct core_stats *core_stats = NULL;
+ struct persistent_data_flag flag;
+ unsigned int reg_data;
+ int i = 0;
+
+ core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
+
+ if (core_stats == NULL)
+ goto fail_alloc_context;
+
+ if (dc == NULL)
+ goto fail_construct;
+
+ core_stats->dc = dc;
+
+ core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ DAL_STATS_ENABLE_REGKEY,
+ &reg_data, sizeof(unsigned int), &flag))
+ core_stats->enabled = reg_data;
+
+ core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ DAL_STATS_ENTRIES_REGKEY,
+ &reg_data, sizeof(unsigned int), &flag)) {
+ if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
+ core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
+ else
+ core_stats->entries = reg_data;
+ }
+
+ core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
+ GFP_KERNEL);
+
+ if (core_stats->time == NULL)
+ goto fail_construct;
+
+ /* Purposely leave index 0 unused so we don't need special logic to
+ * handle calculation cases that depend on previous flip data.
+ */
+ core_stats->index = 1;
+
+ return &core_stats->public;
+
+fail_construct:
+ kfree(core_stats);
+
+fail_alloc_context:
+ return NULL;
+}
+
+void mod_stats_destroy(struct mod_stats *mod_stats)
+{
+ if (mod_stats != NULL) {
+ struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ if (core_stats->time != NULL)
+ kfree(core_stats->time);
+
+ kfree(core_stats);
+ }
+}
+
+void mod_stats_dump(struct mod_stats *mod_stats)
+{
+ struct dc *dc = NULL;
+ struct dal_logger *logger = NULL;
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+ dc = core_stats->dc;
+ logger = dc->ctx->logger;
+ time = core_stats->time;
+
+ //LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
+
+ //if (!pLog->IsDummyEntry())
+ {
+ dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n");
+ dm_logger_write(logger, LOG_PROFILING, "\n");
+ dm_logger_write(logger, LOG_PROFILING, "\n");
+
+ dm_logger_write(logger, LOG_PROFILING, "==Stats==\n");
+ dm_logger_write(logger, LOG_PROFILING,
+ "render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n");
+
+ for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
+ dm_logger_write(logger, LOG_PROFILING,
+ "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n",
+ time[i].render_time_in_us,
+ time[i].avg_render_time_in_us_last_ten,
+ time[i].min_window,
+ time[i].lfc_mid_point_in_us,
+ time[i].max_window,
+ time[i].vsync_to_flip_time_in_us,
+ time[i].flip_to_vsync_time_in_us,
+ time[i].num_vsync_between_flips,
+ time[i].num_frames_inserted,
+ time[i].inserted_duration_in_us,
+ time[i].v_total_min,
+ time[i].v_total_max,
+ time[i].event_triggers,
+ time[i].v_sync_time_in_us[0],
+ time[i].v_sync_time_in_us[1],
+ time[i].v_sync_time_in_us[2],
+ time[i].v_sync_time_in_us[3],
+ time[i].v_sync_time_in_us[4],
+ time[i].flags);
+ }
+ }
+ //GetLog()->Close(pLog);
+ //GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
+}
+
+void mod_stats_reset_data(struct mod_stats *mod_stats)
+{
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ memset(core_stats->time, 0,
+ sizeof(struct stats_time_cache) * core_stats->entries);
+
+ core_stats->index = 0;
+}
+
+void mod_stats_update_flip(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns)
+{
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ if (core_stats->index >= core_stats->entries)
+ return;
+
+ time = core_stats->time;
+ index = core_stats->index;
+
+ time[index].flip_timestamp_in_ns = timestamp_in_ns;
+ time[index].render_time_in_us =
+ timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
+
+ if (index >= 10) {
+ for (unsigned int i = 0; i < 10; i++)
+ time[index].avg_render_time_in_us_last_ten +=
+ time[index - i].render_time_in_us;
+ time[index].avg_render_time_in_us_last_ten /= 10;
+ }
+
+ if (time[index].num_vsync_between_flips > 0)
+ time[index].vsync_to_flip_time_in_us =
+ timestamp_in_ns - time[index].vupdate_timestamp_in_ns;
+ else
+ time[index].vsync_to_flip_time_in_us =
+ timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
+
+ core_stats->index++;
+}
+
+void mod_stats_update_vupdate(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns)
+{
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ if (core_stats->index >= core_stats->entries)
+ return;
+
+ time = core_stats->time;
+ index = core_stats->index;
+
+ time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
+ if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS)
+ time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] =
+ timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
+ time[index].flip_to_vsync_time_in_us =
+ timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
+
+ time[index].num_vsync_between_flips++;
+}
+
+void mod_stats_update_freesync(struct mod_stats *mod_stats,
+ unsigned int v_total_min,
+ unsigned int v_total_max,
+ unsigned int event_triggers,
+ unsigned int window_min,
+ unsigned int window_max,
+ unsigned int lfc_mid_point_in_us,
+ unsigned int inserted_frames,
+ unsigned int inserted_duration_in_us)
+{
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ if (core_stats->index >= core_stats->entries)
+ return;
+
+ time = core_stats->time;
+ index = core_stats->index;
+
+ time[index].v_total_min = v_total_min;
+ time[index].v_total_max = v_total_max;
+ time[index].event_triggers = event_triggers;
+ time[index].min_window = window_min;
+ time[index].max_window = window_max;
+ time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
+ time[index].num_frames_inserted = inserted_frames;
+ time[index].inserted_duration_in_us = inserted_duration_in_us;
+}
+
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 15bd0f9acf73..5c840c022b52 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -24,8 +24,7 @@
#ifndef __KGD_PP_INTERFACE_H__
#define __KGD_PP_INTERFACE_H__
-extern const struct amd_ip_funcs pp_ip_funcs;
-extern const struct amd_pm_funcs pp_dpm_funcs;
+extern const struct amdgpu_ip_block_version pp_smu_ip_block;
struct amd_vce_state {
/* vce clocks */
diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
new file mode 100644
index 000000000000..a12d4f27cfa4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SOC15_IH_CLIENTID_H__
+#define __SOC15_IH_CLIENTID_H__
+
+ /*
+ * vega10+ IH clients
+ */
+enum soc15_ih_clientid {
+ SOC15_IH_CLIENTID_IH = 0x00,
+ SOC15_IH_CLIENTID_ACP = 0x01,
+ SOC15_IH_CLIENTID_ATHUB = 0x02,
+ SOC15_IH_CLIENTID_BIF = 0x03,
+ SOC15_IH_CLIENTID_DCE = 0x04,
+ SOC15_IH_CLIENTID_ISP = 0x05,
+ SOC15_IH_CLIENTID_PCIE0 = 0x06,
+ SOC15_IH_CLIENTID_RLC = 0x07,
+ SOC15_IH_CLIENTID_SDMA0 = 0x08,
+ SOC15_IH_CLIENTID_SDMA1 = 0x09,
+ SOC15_IH_CLIENTID_SE0SH = 0x0a,
+ SOC15_IH_CLIENTID_SE1SH = 0x0b,
+ SOC15_IH_CLIENTID_SE2SH = 0x0c,
+ SOC15_IH_CLIENTID_SE3SH = 0x0d,
+ SOC15_IH_CLIENTID_SYSHUB = 0x0e,
+ SOC15_IH_CLIENTID_THM = 0x0f,
+ SOC15_IH_CLIENTID_UVD = 0x10,
+ SOC15_IH_CLIENTID_VCE0 = 0x11,
+ SOC15_IH_CLIENTID_VMC = 0x12,
+ SOC15_IH_CLIENTID_XDMA = 0x13,
+ SOC15_IH_CLIENTID_GRBM_CP = 0x14,
+ SOC15_IH_CLIENTID_ATS = 0x15,
+ SOC15_IH_CLIENTID_ROM_SMUIO = 0x16,
+ SOC15_IH_CLIENTID_DF = 0x17,
+ SOC15_IH_CLIENTID_VCE1 = 0x18,
+ SOC15_IH_CLIENTID_PWR = 0x19,
+ SOC15_IH_CLIENTID_UTCL2 = 0x1b,
+ SOC15_IH_CLIENTID_EA = 0x1c,
+ SOC15_IH_CLIENTID_UTCL2LOG = 0x1d,
+ SOC15_IH_CLIENTID_MP0 = 0x1e,
+ SOC15_IH_CLIENTID_MP1 = 0x1f,
+
+ SOC15_IH_CLIENTID_MAX,
+
+ SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD
+};
+
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/include/vega10_ip_offset.h b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
index 4c78dba5cf25..976dd2d565ba 100644
--- a/drivers/gpu/drm/amd/include/vega10_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
@@ -24,191 +24,191 @@
#define MAX_INSTANCE 5
#define MAX_SEGMENT 5
-struct IP_BASE_INSTANCE
+struct IP_BASE_INSTANCE
{
unsigned int segment[MAX_SEGMENT];
};
-
-struct IP_BASE
+
+struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
-static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
-static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
-static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // this file does not contain registers
-static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0, 0, 0, 0 } },
- { { 0x00016E00, 0, 0, 0, 0 } },
- { { 0x00017000, 0, 0, 0, 0 } },
- { { 0x00017200, 0, 0, 0, 0 } },
- { { 0x00017E00, 0, 0, 0, 0 } } } };
-static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+ { { 0x00016E00, 0, 0, 0, 0 } },
+ { { 0x00017000, 0, 0, 0, 0 } },
+ { { 0x00017200, 0, 0, 0, 0 } },
+ { { 0x00017E00, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index b989bf3542d6..3da3dccd13e2 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include "amd_shared.h"
#include "amd_powerplay.h"
-#include "pp_instance.h"
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
@@ -37,18 +36,14 @@
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state);
-static inline int pp_check(struct pp_instance *handle)
-{
- if (handle == NULL)
- return -EINVAL;
+static const struct amd_pm_funcs pp_dpm_funcs;
- if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL)
+static inline int pp_check(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
return -EINVAL;
- if (handle->pm_en == 0)
- return PP_DPM_DISABLED;
-
- if (handle->hwmgr->hwmgr_func == NULL)
+ if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
return PP_DPM_DISABLED;
return 0;
@@ -56,54 +51,52 @@ static inline int pp_check(struct pp_instance *handle)
static int amd_powerplay_create(struct amdgpu_device *adev)
{
- struct pp_instance *instance;
+ struct pp_hwmgr *hwmgr;
if (adev == NULL)
return -EINVAL;
- instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
- if (instance == NULL)
+ hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
+ if (hwmgr == NULL)
return -ENOMEM;
- instance->parent = adev;
- instance->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
- instance->device = adev->powerplay.cgs_device;
- mutex_init(&instance->pp_lock);
- adev->powerplay.pp_handle = instance;
-
+ hwmgr->adev = adev;
+ hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
+ hwmgr->device = amdgpu_cgs_create_device(adev);
+ mutex_init(&hwmgr->smu_lock);
+ hwmgr->chip_family = adev->family;
+ hwmgr->chip_id = adev->asic_type;
+ hwmgr->feature_mask = amdgpu_pp_feature_mask;
+ adev->powerplay.pp_handle = hwmgr;
+ adev->powerplay.pp_funcs = &pp_dpm_funcs;
return 0;
}
-static int amd_powerplay_destroy(void *handle)
+static int amd_powerplay_destroy(struct amdgpu_device *adev)
{
- struct pp_instance *instance = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- kfree(instance->hwmgr->hardcode_pp_table);
- instance->hwmgr->hardcode_pp_table = NULL;
+ kfree(hwmgr->hardcode_pp_table);
+ hwmgr->hardcode_pp_table = NULL;
- kfree(instance->hwmgr);
- instance->hwmgr = NULL;
+ kfree(hwmgr);
+ hwmgr = NULL;
- kfree(instance);
- instance = NULL;
return 0;
}
static int pp_early_init(void *handle)
{
int ret;
- struct pp_instance *pp_handle = NULL;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = handle;
ret = amd_powerplay_create(adev);
if (ret != 0)
return ret;
- pp_handle = adev->powerplay.pp_handle;
-
- ret = hwmgr_early_init(pp_handle);
+ ret = hwmgr_early_init(adev->powerplay.pp_handle);
if (ret)
return -EINVAL;
@@ -112,15 +105,13 @@ static int pp_early_init(void *handle)
static int pp_sw_init(void *handle)
{
- struct pp_hwmgr *hwmgr;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret >= 0) {
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->smumgr_funcs->smu_init == NULL)
return -EINVAL;
@@ -128,55 +119,57 @@ static int pp_sw_init(void *handle)
pr_debug("amdgpu: powerplay sw initialized\n");
}
+
return ret;
}
static int pp_sw_fini(void *handle)
{
- struct pp_hwmgr *hwmgr;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret >= 0) {
- hwmgr = pp_handle->hwmgr;
+ if (hwmgr->smumgr_funcs->smu_fini != NULL)
+ hwmgr->smumgr_funcs->smu_fini(hwmgr);
+ }
- if (hwmgr->smumgr_funcs->smu_fini == NULL)
- return -EINVAL;
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ amdgpu_ucode_fini_bo(adev);
- ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
- }
- return ret;
+ return 0;
}
static int pp_hw_init(void *handle)
{
int ret = 0;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- struct pp_hwmgr *hwmgr;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- ret = pp_check(pp_handle);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ amdgpu_ucode_init_bo(adev);
- if (ret >= 0) {
- hwmgr = pp_handle->hwmgr;
+ ret = pp_check(hwmgr);
+ if (ret >= 0) {
if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
+ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
pr_err("smc start failed\n");
- hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
+ hwmgr->smumgr_funcs->smu_fini(hwmgr);
return -EINVAL;
}
if (ret == PP_DPM_DISABLED)
goto exit;
- ret = hwmgr_hw_init(pp_handle);
+ ret = hwmgr_hw_init(hwmgr);
if (ret)
goto exit;
}
return ret;
exit:
- pp_handle->pm_en = 0;
+ hwmgr->pm_en = 0;
cgs_notify_dpm_enabled(hwmgr->device, false);
return 0;
@@ -184,24 +177,27 @@ exit:
static int pp_hw_fini(void *handle)
{
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret == 0)
- hwmgr_hw_fini(pp_handle);
+ hwmgr_hw_fini(hwmgr);
return 0;
}
static int pp_late_init(void *handle)
{
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
+
if (ret == 0)
- pp_dpm_dispatch_tasks(pp_handle,
+ pp_dpm_dispatch_tasks(hwmgr,
AMD_PP_TASK_COMPLETE_INIT, NULL);
return 0;
@@ -209,7 +205,9 @@ static int pp_late_init(void *handle)
static void pp_late_fini(void *handle)
{
- amd_powerplay_destroy(handle);
+ struct amdgpu_device *adev = handle;
+
+ amd_powerplay_destroy(adev);
}
@@ -231,17 +229,15 @@ static int pp_sw_reset(void *handle)
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
@@ -254,44 +250,43 @@ static int pp_set_powergating_state(void *handle,
static int pp_suspend(void *handle)
{
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret == 0)
- hwmgr_hw_suspend(pp_handle);
+ hwmgr_hw_suspend(hwmgr);
return 0;
}
static int pp_resume(void *handle)
{
- struct pp_hwmgr *hwmgr;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret < 0)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
+ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
pr_err("smc start failed\n");
- hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
+ hwmgr->smumgr_funcs->smu_fini(hwmgr);
return -EINVAL;
}
if (ret == PP_DPM_DISABLED)
return 0;
- return hwmgr_hw_resume(pp_handle);
+ return hwmgr_hw_resume(hwmgr);
}
-const struct amd_ip_funcs pp_ip_funcs = {
+static const struct amd_ip_funcs pp_ip_funcs = {
.name = "powerplay",
.early_init = pp_early_init,
.late_init = pp_late_init,
@@ -309,6 +304,15 @@ const struct amd_ip_funcs pp_ip_funcs = {
.set_powergating_state = pp_set_powergating_state,
};
+const struct amdgpu_ip_block_version pp_smu_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &pp_ip_funcs,
+};
+
static int pp_dpm_load_fw(void *handle)
{
return 0;
@@ -321,17 +325,14 @@ static int pp_dpm_fw_loading_complete(void *handle)
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
@@ -379,25 +380,22 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (level == hwmgr->dpm_level)
return 0;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
- hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- mutex_unlock(&pp_handle->pp_lock);
+ hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -405,152 +403,135 @@ static int pp_dpm_force_performance_level(void *handle,
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
enum amd_dpm_forced_level level;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
level = hwmgr->dpm_level;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return level;
}
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
uint32_t clk = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_sclk == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return clk;
}
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
uint32_t clk = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_mclk == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return clk;
}
static void pp_dpm_powergate_vce(void *handle, bool gate)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
}
static void pp_dpm_powergate_uvd(void *handle, bool gate)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
}
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
int ret = 0;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr_handle_task(pp_handle, task_id, user_state);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr_handle_task(hwmgr, task_id, user_state);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
- struct pp_hwmgr *hwmgr;
+ struct pp_hwmgr *hwmgr = handle;
struct pp_power_state *state;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
enum amd_pm_state_type pm_type;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->current_ps == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
state = hwmgr->current_ps;
@@ -571,147 +552,129 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return pm_type;
}
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
}
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
uint32_t mode = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return mode;
}
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_pp_num_states(void *handle,
struct pp_states_info *data)
{
- struct pp_hwmgr *hwmgr;
+ struct pp_hwmgr *hwmgr = handle;
int i;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
memset(data, 0, sizeof(*data));
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->ps == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
data->nums = hwmgr->num_ps;
@@ -735,73 +698,68 @@ static int pp_dpm_get_pp_num_states(void *handle,
data->states[i] = POWER_STATE_TYPE_DEFAULT;
}
}
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_dpm_get_pp_table(void *handle, char **table)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
int size = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (!hwmgr->soft_pp_table)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
*table = (char *)hwmgr->soft_pp_table;
size = hwmgr->soft_pp_table_size;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return size;
}
static int amd_powerplay_reset(void *handle)
{
- struct pp_instance *instance = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret;
- ret = pp_check(instance);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- ret = pp_hw_fini(instance);
+ ret = pp_hw_fini(hwmgr);
if (ret)
return ret;
- ret = hwmgr_hw_init(instance);
+ ret = hwmgr_hw_init(hwmgr);
if (ret)
return ret;
- return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL);
+ return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
}
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size,
GFP_KERNEL);
if (!hwmgr->hardcode_pp_table) {
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return -ENOMEM;
}
}
@@ -809,7 +767,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
memcpy(hwmgr->hardcode_pp_table, buf, size);
hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
ret = amd_powerplay_reset(handle);
if (ret)
@@ -827,163 +785,142 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
else
ret = -EINVAL;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_sclk_od(void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_mclk_od(void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (value == NULL)
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
-
switch (idx) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
*((uint32_t *)value) = hwmgr->pstate_sclk;
@@ -992,9 +929,9 @@ static int pp_dpm_read_sensor(void *handle, int idx,
*((uint32_t *)value) = hwmgr->pstate_mclk;
return 0;
default:
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
}
@@ -1002,17 +939,14 @@ static int pp_dpm_read_sensor(void *handle, int idx,
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return NULL;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr && idx < hwmgr->num_vce_state_tables)
return &hwmgr->vce_states[idx];
return NULL;
@@ -1020,14 +954,11 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
static int pp_get_power_profile_mode(void *handle, char *buf)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
- if (!buf || pp_check(pp_handle))
+ if (!buf || pp_check(hwmgr))
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return snprintf(buf, PAGE_SIZE, "\n");
@@ -1038,36 +969,30 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = -EINVAL;
- if (pp_check(pp_handle))
+ if (pp_check(hwmgr))
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return -EINVAL;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
- if (pp_check(pp_handle))
+ if (pp_check(hwmgr))
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
pr_info("%s was not implemented.\n", __func__);
return -EINVAL;
@@ -1079,16 +1004,13 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
static int pp_dpm_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type, bool en)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
long workload;
uint32_t index;
- if (pp_check(pp_handle))
+ if (pp_check(hwmgr))
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return -EINVAL;
@@ -1097,7 +1019,7 @@ static int pp_dpm_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
if (!en) {
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
@@ -1113,7 +1035,7 @@ static int pp_dpm_switch_power_profile(void *handle,
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1125,46 +1047,40 @@ static int pp_dpm_notify_smu_memory_info(void *handle,
uint32_t mc_addr_hi,
uint32_t size)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
pr_info("%s was not implemented.\n", __func__);
return -EINVAL;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
virtual_addr_hi, mc_addr_low, mc_addr_hi,
size);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_set_power_limit(void *handle, uint32_t limit)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_power_limit == NULL) {
pr_info("%s was not implemented.\n", __func__);
return -EINVAL;
@@ -1176,20 +1092,19 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
if (limit > hwmgr->default_power_limit)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
hwmgr->power_limit = limit;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
@@ -1197,16 +1112,14 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
if (limit == NULL)
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
if (default_limit)
*limit = hwmgr->default_power_limit;
else
*limit = hwmgr->power_limit;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1214,42 +1127,37 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
static int pp_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *display_config)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
phm_store_dal_configuration_data(hwmgr, display_config);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_get_display_power_level(void *handle,
struct amd_pp_simple_clock_info *output)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (output == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_get_dal_power_level(hwmgr, output);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1258,18 +1166,15 @@ static int pp_get_current_clocks(void *handle,
{
struct amd_pp_simple_clock_info simple_clocks;
struct pp_clock_info hw_clocks;
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
phm_get_dal_power_level(hwmgr, &simple_clocks);
@@ -1283,7 +1188,7 @@ static int pp_get_current_clocks(void *handle,
if (ret) {
pr_info("Error in phm_get_clock_info \n");
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1303,29 +1208,26 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
}
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (clocks == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type(hwmgr, type, clocks);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1333,21 +1235,19 @@ static int pp_get_clock_by_type_with_latency(void *handle,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (!clocks)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1355,47 +1255,41 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (!clocks)
return -EINVAL;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (!wm_with_clock_ranges)
return -EINVAL;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
wm_with_clock_ranges);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1403,22 +1297,19 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
static int pp_display_clock_voltage_request(void *handle,
struct pp_display_clock_request *clock)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (!clock)
return -EINVAL;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_display_clock_voltage_request(hwmgr, clock);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1426,42 +1317,36 @@ static int pp_display_clock_voltage_request(void *handle,
static int pp_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *clocks)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (clocks == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
ret = phm_get_max_high_clocks(hwmgr, clocks);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
@@ -1470,7 +1355,7 @@ static int pp_set_mmhub_powergating_by_smu(void *handle)
return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
}
-const struct amd_pm_funcs pp_dpm_funcs = {
+static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
.force_performance_level = pp_dpm_force_performance_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index e8c5a4f84324..f868b955da92 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -24,14 +24,14 @@
# It provides the hardware management services for the driver.
HARDWARE_MGR = hwmgr.o processpptables.o \
- hardwaremanager.o cz_hwmgr.o \
- cz_clockpowergating.o pppcielanes.o\
+ hardwaremanager.o smu8_hwmgr.o \
+ pppcielanes.o\
process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
smu7_clockpowergating.o \
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
- vega10_thermal.o rv_hwmgr.o pp_psm.o\
- pp_overdriver.o
+ vega10_thermal.o smu10_hwmgr.o pp_psm.o\
+ pp_overdriver.o smu_helper.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
deleted file mode 100644
index 416abebb8b86..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "cz_clockpowergating.h"
-#include "cz_ppsmc.h"
-
-/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
- 0 GFX0L (3:0), (27:24),
- 1 GFX0H (7:4), (31:28),
- 2 GFX1L (3:0), (19:16),
- 3 GFX1H (7:4), (23:20),
- 4 DDIL (3:0), (11: 8),
- 5 DDIH (7:4), (15:12),
- 6 DDI2L (3:0), ( 3: 0),
- 7 DDI2H (7:4), ( 7: 4),
-*/
-#define DDI_PHY_GEN_STATUS_VAL(phyID) (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
-#define IS_PHY_ID_USED_BY_PLL(PhyID) (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
-
-
-int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
-{
- int ret = 0;
-
- switch (block) {
- case PHM_AsicBlock_UVD_MVC:
- case PHM_AsicBlock_UVD:
- case PHM_AsicBlock_UVD_HD:
- case PHM_AsicBlock_UVD_SD:
- if (gating == PHM_ClockGateSetting_StaticOff)
- ret = cz_dpm_powerdown_uvd(hwmgr);
- else
- ret = cz_dpm_powerup_uvd(hwmgr);
- break;
- case PHM_AsicBlock_GFX:
- default:
- break;
- }
-
- return ret;
-}
-
-
-bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
-{
- return true;
-}
-
-
-int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
-{
- return 0;
-}
-
-int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
-{
- /* TODO */
- return 0;
-}
-
-int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
-{
- /* TODO */
- return 0;
-}
-
-int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
-{
- /* TODO */
- return 0;
-}
-
-int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
-{
- /* TODO */
- return 0;
-}
-
-int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
-{
- /* TODO */
- return 0;
-}
-
-int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- uint32_t dpm_features = 0;
-
- if (enable &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM)) {
- cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
- dpm_features |= UVD_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
- } else {
- dpm_features |= UVD_DPM_MASK;
- cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
- }
- return 0;
-}
-
-int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- uint32_t dpm_features = 0;
-
- if (enable && phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEDPM)) {
- cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
- dpm_features |= VCE_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
- } else {
- dpm_features |= VCE_DPM_MASK;
- cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
- }
-
- return 0;
-}
-
-
-void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
- cz_hwmgr->uvd_power_gated = bgate;
-
- if (bgate) {
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
- cz_dpm_update_uvd_dpm(hwmgr, true);
- cz_dpm_powerdown_uvd(hwmgr);
- } else {
- cz_dpm_powerup_uvd(hwmgr);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- cz_dpm_update_uvd_dpm(hwmgr, false);
- }
-
-}
-
-void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
- if (bgate) {
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
- cz_enable_disable_vce_dpm(hwmgr, false);
- cz_dpm_powerdown_vce(hwmgr);
- cz_hwmgr->vce_power_gated = true;
- } else {
- cz_dpm_powerup_vce(hwmgr);
- cz_hwmgr->vce_power_gated = false;
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- cz_dpm_update_vce_dpm(hwmgr);
- cz_enable_disable_vce_dpm(hwmgr, true);
- }
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
deleted file mode 100644
index 92f707bc46e7..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _CZ_CLOCK_POWER_GATING_H_
-#define _CZ_CLOCK_POWER_GATING_H_
-
-#include "cz_hwmgr.h"
-#include "pp_asicblocks.h"
-
-extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
-#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index af1b22d964fd..229030027f3e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,22 +30,24 @@
#include <drm/amdgpu_drm.h>
#include "power_state.h"
#include "hwmgr.h"
-#include "pppcielanes.h"
-#include "ppatomctrl.h"
#include "ppsmc.h"
#include "amd_acpi.h"
#include "pp_psm.h"
extern const struct pp_smumgr_func ci_smu_funcs;
-extern const struct pp_smumgr_func cz_smu_funcs;
+extern const struct pp_smumgr_func smu8_smu_funcs;
extern const struct pp_smumgr_func iceland_smu_funcs;
extern const struct pp_smumgr_func tonga_smu_funcs;
extern const struct pp_smumgr_func fiji_smu_funcs;
extern const struct pp_smumgr_func polaris10_smu_funcs;
extern const struct pp_smumgr_func vega10_smu_funcs;
-extern const struct pp_smumgr_func rv_smu_funcs;
+extern const struct pp_smumgr_func smu10_smu_funcs;
+
+extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
+extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
+extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
-extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
@@ -54,32 +56,6 @@ static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
-uint8_t convert_to_vid(uint16_t vddc)
-{
- return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-uint16_t convert_to_vddc(uint8_t vid)
-{
- return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
-}
-
-uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
-{
- u32 mask = 0;
- u32 shift = 0;
-
- shift = (offset % 4) << 3;
- if (size == sizeof(uint8_t))
- mask = 0xFF << shift;
- else if (size == sizeof(uint16_t))
- mask = 0xFFFF << shift;
-
- original_data &= ~mask;
- original_data |= (field << shift);
- return original_data;
-}
-
static int phm_thermal_l2h_irq(void *private_data,
unsigned src_id, const uint32_t *iv_entry)
{
@@ -140,23 +116,11 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
}
-int hwmgr_early_init(struct pp_instance *handle)
+int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
-
- if (handle == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
- if (hwmgr == NULL)
- return -ENOMEM;
-
- handle->hwmgr = hwmgr;
- hwmgr->adev = handle->parent;
- hwmgr->device = handle->device;
- hwmgr->chip_family = ((struct amdgpu_device *)handle->parent)->family;
- hwmgr->chip_id = ((struct amdgpu_device *)handle->parent)->asic_type;
- hwmgr->feature_mask = amdgpu_pp_feature_mask;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
hwmgr->power_source = PP_PowerSource_AC;
hwmgr->pp_table_version = PP_TABLE_V1;
@@ -180,8 +144,8 @@ int hwmgr_early_init(struct pp_instance *handle)
break;
case AMDGPU_FAMILY_CZ:
hwmgr->od_enabled = false;
- hwmgr->smumgr_funcs = &cz_smu_funcs;
- cz_init_function_pointers(hwmgr);
+ hwmgr->smumgr_funcs = &smu8_smu_funcs;
+ smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
@@ -230,8 +194,8 @@ int hwmgr_early_init(struct pp_instance *handle)
switch (hwmgr->chip_id) {
case CHIP_RAVEN:
hwmgr->od_enabled = false;
- hwmgr->smumgr_funcs = &rv_smu_funcs;
- rv_init_function_pointers(hwmgr);
+ hwmgr->smumgr_funcs = &smu10_smu_funcs;
+ smu10_init_function_pointers(hwmgr);
break;
default:
return -EINVAL;
@@ -244,16 +208,13 @@ int hwmgr_early_init(struct pp_instance *handle)
return 0;
}
-int hwmgr_hw_init(struct pp_instance *handle)
+int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
int ret = 0;
- if (handle == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
-
if (hwmgr->pptable_func == NULL ||
hwmgr->pptable_func->pptable_init == NULL ||
hwmgr->hwmgr_func->backend_init == NULL)
@@ -299,15 +260,11 @@ err:
return ret;
}
-int hwmgr_hw_fini(struct pp_instance *handle)
+int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
-
- if (handle == NULL || handle->hwmgr == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
-
phm_stop_thermal_controller(hwmgr);
psm_set_boot_states(hwmgr);
psm_adjust_power_state_dynamic(hwmgr, false, NULL);
@@ -321,15 +278,13 @@ int hwmgr_hw_fini(struct pp_instance *handle)
return psm_fini_power_state_table(hwmgr);
}
-int hwmgr_hw_suspend(struct pp_instance *handle)
+int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
int ret = 0;
- if (handle == NULL || handle->hwmgr == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
phm_disable_smc_firmware_ctf(hwmgr);
ret = psm_set_boot_states(hwmgr);
if (ret)
@@ -342,15 +297,13 @@ int hwmgr_hw_suspend(struct pp_instance *handle)
return ret;
}
-int hwmgr_hw_resume(struct pp_instance *handle)
+int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
int ret = 0;
- if (handle == NULL || handle->hwmgr == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
ret = phm_setup_asic(hwmgr);
if (ret)
return ret;
@@ -385,17 +338,14 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
}
}
-int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
+int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
int ret = 0;
- struct pp_hwmgr *hwmgr;
- if (handle == NULL || handle->hwmgr == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
-
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = phm_set_cpu_power_state(hwmgr);
@@ -432,468 +382,6 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
}
return ret;
}
-/**
- * Returns once the part of the register indicated by the mask has
- * reached the given value.
- */
-int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (hwmgr == NULL || hwmgr->device == NULL) {
- pr_err("Invalid Hardware Manager!");
- return -EINVAL;
- }
-
- for (i = 0; i < hwmgr->usec_timeout; i++) {
- cur_value = cgs_read_register(hwmgr->device, index);
- if ((cur_value & mask) == (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == hwmgr->usec_timeout)
- return -1;
- return 0;
-}
-
-
-/**
- * Returns once the part of the register indicated by the mask has
- * reached the given value.The indirect space is described by giving
- * the memory-mapped index of the indirect index register.
- */
-int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (hwmgr == NULL || hwmgr->device == NULL) {
- pr_err("Invalid Hardware Manager!");
- return -EINVAL;
- }
-
- cgs_write_register(hwmgr->device, indirect_port, index);
- return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
-}
-
-int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- for (i = 0; i < hwmgr->usec_timeout; i++) {
- cur_value = cgs_read_register(hwmgr->device,
- index);
- if ((cur_value & mask) != (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic */
- if (i == hwmgr->usec_timeout)
- return -ETIME;
- return 0;
-}
-
-int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, indirect_port, index);
- return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
- value, mask);
-}
-
-bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
-{
- return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
-}
-
-bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
-{
- return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
-}
-
-
-int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
-{
- uint32_t i, j;
- uint16_t vvalue;
- bool found = false;
- struct pp_atomctrl_voltage_table *table;
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "Voltage Table empty.", return -EINVAL);
-
- table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
- GFP_KERNEL);
-
- if (NULL == table)
- return -EINVAL;
-
- table->mask_low = vol_table->mask_low;
- table->phase_delay = vol_table->phase_delay;
-
- for (i = 0; i < vol_table->count; i++) {
- vvalue = vol_table->entries[i].value;
- found = false;
-
- for (j = 0; j < table->count; j++) {
- if (vvalue == table->entries[j].value) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- table->entries[table->count].value = vvalue;
- table->entries[table->count].smio_low =
- vol_table->entries[i].smio_low;
- table->count++;
- }
- }
-
- memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
- kfree(table);
- table = NULL;
- return 0;
-}
-
-int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "vol_table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].mvdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = phm_trim_voltage_table(vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim MVDD table.", return result);
-
- return 0;
-}
-
-int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "vol_table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].vddci;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = phm_trim_voltage_table(vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim VDDCI table.", return result);
-
- return 0;
-}
-
-int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
- phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- int i = 0;
-
- PP_ASSERT_WITH_CODE((0 != lookup_table->count),
- "Voltage Lookup Table empty.", return -EINVAL);
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "vol_table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
-
- vol_table->count = lookup_table->count;
-
- for (i = 0; i < vol_table->count; i++) {
- vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
- struct pp_atomctrl_voltage_table *vol_table)
-{
- unsigned int i, diff;
-
- if (vol_table->count <= max_vol_steps)
- return;
-
- diff = vol_table->count - max_vol_steps;
-
- for (i = 0; i < max_vol_steps; i++)
- vol_table->entries[i] = vol_table->entries[i + diff];
-
- vol_table->count = max_vol_steps;
-
- return;
-}
-
-int phm_reset_single_dpm_table(void *table,
- uint32_t count, int max)
-{
- int i;
-
- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
-
- dpm_table->count = count > max ? max : count;
-
- for (i = 0; i < dpm_table->count; i++)
- dpm_table->dpm_level[i].enabled = false;
-
- return 0;
-}
-
-void phm_setup_pcie_table_entry(
- void *table,
- uint32_t index, uint32_t pcie_gen,
- uint32_t pcie_lanes)
-{
- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
- dpm_table->dpm_level[index].value = pcie_gen;
- dpm_table->dpm_level[index].param1 = pcie_lanes;
- dpm_table->dpm_level[index].enabled = 1;
-}
-
-int32_t phm_get_dpm_level_enable_mask_value(void *table)
-{
- int32_t i;
- int32_t mask = 0;
- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask = mask << 1;
- if (dpm_table->dpm_level[i - 1].enabled)
- mask |= 0x1;
- else
- mask &= 0xFFFFFFFE;
- }
-
- return mask;
-}
-
-uint8_t phm_get_voltage_index(
- struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
-{
- uint8_t count = (uint8_t) (lookup_table->count);
- uint8_t i;
-
- PP_ASSERT_WITH_CODE((NULL != lookup_table),
- "Lookup Table empty.", return 0);
- PP_ASSERT_WITH_CODE((0 != count),
- "Lookup Table empty.", return 0);
-
- for (i = 0; i < lookup_table->count; i++) {
- /* find first voltage equal or bigger than requested */
- if (lookup_table->entries[i].us_vdd >= voltage)
- return i;
- }
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
- uint32_t voltage)
-{
- uint8_t count = (uint8_t) (voltage_table->count);
- uint8_t i = 0;
-
- PP_ASSERT_WITH_CODE((NULL != voltage_table),
- "Voltage Table empty.", return 0;);
- PP_ASSERT_WITH_CODE((0 != count),
- "Voltage Table empty.", return 0;);
-
- for (i = 0; i < count; i++) {
- /* find first voltage bigger than requested */
- if (voltage_table->entries[i].value >= voltage)
- return i;
- }
-
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
-{
- uint32_t i;
-
- for (i = 0; i < vddci_table->count; i++) {
- if (vddci_table->entries[i].value >= vddci)
- return vddci_table->entries[i].value;
- }
-
- pr_debug("vddci is larger than max value in vddci_table\n");
- return vddci_table->entries[i-1].value;
-}
-
-int phm_find_boot_level(void *table,
- uint32_t value, uint32_t *boot_level)
-{
- int result = -EINVAL;
- uint32_t i;
- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
-
- for (i = 0; i < dpm_table->count; i++) {
- if (value == dpm_table->dpm_level[i].value) {
- *boot_level = i;
- result = 0;
- }
- }
-
- return result;
-}
-
-int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, int32_t *sclk)
-{
- uint8_t entry_id;
- uint8_t voltage_id;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
- voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
- if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
- break;
- }
-
- if (entry_id >= table_info->vdd_dep_on_sclk->count) {
- pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
- return -EINVAL;
- }
-
- *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
-
- return 0;
-}
-
-/**
- * Initialize Dynamic State Adjustment Rule Settings
- *
- * @param hwmgr the address of the powerplay hardware manager.
- */
-int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size;
- struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* initialize vddc_dep_on_dal_pwrl table */
- table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == table_clk_vlt) {
- pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
- return -ENOMEM;
- } else {
- table_clk_vlt->count = 4;
- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
- table_clk_vlt->entries[0].v = 0;
- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
- table_clk_vlt->entries[1].v = 720;
- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
- table_clk_vlt->entries[2].v = 810;
- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
- table_clk_vlt->entries[3].v = 900;
- if (pptable_info != NULL)
- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
- }
-
- return 0;
-}
-
-uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
-{
- uint32_t level = 0;
-
- while (0 == (mask & (1 << level)))
- level++;
-
- return level;
-}
-
-void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table || table->count <= 0
- || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
- || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
- return;
-
- for (i = 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i = 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_VddC_Request, req_volt);
- return;
- }
- }
- pr_err("DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
{
@@ -954,25 +442,6 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
return 0;
}
-int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t id, uint16_t *voltage)
-{
- uint32_t vol;
- int ret = 0;
-
- if (hwmgr->chip_id < CHIP_TONGA) {
- ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
- } else if (hwmgr->chip_id < CHIP_POLARIS10) {
- ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
- if (*voltage >= 2000 || *voltage == 0)
- *voltage = 1150;
- } else {
- ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
- *voltage = (uint16_t)(vol/100);
- }
- return ret;
-}
-
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 8ddfb78f28cc..10253b89b3d8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -32,53 +32,52 @@
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "rv_ppsmc.h"
-#include "rv_hwmgr.h"
+#include "smu10_hwmgr.h"
#include "power_state.h"
-#include "rv_smumgr.h"
#include "pp_soc15.h"
-#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
+#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
#define SCLK_MIN_DIV_INTV_SHIFT 12
-#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
+#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
-static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic;
+static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
-int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req);
-static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps)
+static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
{
- if (PhwRaven_Magic != hw_ps->magic)
+ if (SMU10_Magic != hw_ps->magic)
return NULL;
- return (struct rv_power_state *)hw_ps;
+ return (struct smu10_power_state *)hw_ps;
}
-static const struct rv_power_state *cast_const_rv_ps(
+static const struct smu10_power_state *cast_const_smu10_ps(
const struct pp_hw_power_state *hw_ps)
{
- if (PhwRaven_Magic != hw_ps->magic)
+ if (SMU10_Magic != hw_ps->magic)
return NULL;
- return (struct rv_power_state *)hw_ps;
+ return (struct smu10_power_state *)hw_ps;
}
-static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{
- struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- rv_hwmgr->dce_slow_sclk_threshold = 30000;
- rv_hwmgr->thermal_auto_throttling_treshold = 0;
- rv_hwmgr->is_nb_dpm_enabled = 1;
- rv_hwmgr->dpm_flags = 1;
- rv_hwmgr->gfx_off_controled_by_driver = false;
- rv_hwmgr->need_min_deep_sleep_dcefclk = true;
- rv_hwmgr->num_active_display = 0;
- rv_hwmgr->deep_sleep_dcefclk = 0;
+ smu10_data->dce_slow_sclk_threshold = 30000;
+ smu10_data->thermal_auto_throttling_treshold = 0;
+ smu10_data->is_nb_dpm_enabled = 1;
+ smu10_data->dpm_flags = 1;
+ smu10_data->gfx_off_controled_by_driver = false;
+ smu10_data->need_min_deep_sleep_dcefclk = true;
+ smu10_data->num_active_display = 0;
+ smu10_data->deep_sleep_dcefclk = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
@@ -91,13 +90,13 @@ static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
return 0;
}
-static int rv_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
+static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
struct phm_clock_and_voltage_limits *table)
{
return 0;
}
-static int rv_init_dynamic_state_adjustment_rule_settings(
+static int smu10_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr)
{
uint32_t table_size =
@@ -134,30 +133,30 @@ static int rv_init_dynamic_state_adjustment_rule_settings(
return 0;
}
-static int rv_get_system_info_data(struct pp_hwmgr *hwmgr)
+static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)hwmgr->backend;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
- rv_data->sys_info.htc_hyst_lmt = 5;
- rv_data->sys_info.htc_tmp_lmt = 203;
+ smu10_data->sys_info.htc_hyst_lmt = 5;
+ smu10_data->sys_info.htc_tmp_lmt = 203;
- if (rv_data->thermal_auto_throttling_treshold == 0)
- rv_data->thermal_auto_throttling_treshold = 203;
+ if (smu10_data->thermal_auto_throttling_treshold == 0)
+ smu10_data->thermal_auto_throttling_treshold = 203;
- rv_construct_max_power_limits_table (hwmgr,
+ smu10_construct_max_power_limits_table (hwmgr,
&hwmgr->dyn_state.max_clock_voltage_on_ac);
- rv_init_dynamic_state_adjustment_rule_settings(hwmgr);
+ smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
return 0;
}
-static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
+static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
{
return 0;
}
-static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
+static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
{
struct PP_Clocks clocks = {0};
struct pp_display_clock_request clock_req;
@@ -166,111 +165,109 @@ static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
clock_req.clock_type = amd_pp_dcf_clock;
clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
- PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
+ PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
"Attempt to set DCF Clock Failed!", return -EINVAL);
return 0;
}
-static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) {
- rv_data->deep_sleep_dcefclk = clock/100;
+ if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
+ smu10_data->deep_sleep_dcefclk = clock/100;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- rv_data->deep_sleep_dcefclk);
+ smu10_data->deep_sleep_dcefclk);
}
return 0;
}
-static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (rv_data->num_active_display != count) {
- rv_data->num_active_display = count;
+ if (smu10_data->num_active_display != count) {
+ smu10_data->num_active_display = count;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplayCount,
- rv_data->num_active_display);
+ smu10_data->num_active_display);
}
return 0;
}
-static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
- return rv_set_clock_limit(hwmgr, input);
+ return smu10_set_clock_limit(hwmgr, input);
}
-static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr)
+static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- rv_data->vcn_power_gated = true;
- rv_data->isp_tileA_power_gated = true;
- rv_data->isp_tileB_power_gated = true;
+ smu10_data->vcn_power_gated = true;
+ smu10_data->isp_tileA_power_gated = true;
+ smu10_data->isp_tileB_power_gated = true;
return 0;
}
-static int rv_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
{
- return rv_init_power_gate_state(hwmgr);
+ return smu10_init_power_gate_state(hwmgr);
}
-static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr)
+static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- rv_data->separation_time = 0;
- rv_data->cc6_disable = false;
- rv_data->pstate_disable = false;
- rv_data->cc6_setting_changed = false;
+ smu10_data->separation_time = 0;
+ smu10_data->cc6_disable = false;
+ smu10_data->pstate_disable = false;
+ smu10_data->cc6_setting_changed = false;
return 0;
}
-static int rv_power_off_asic(struct pp_hwmgr *hwmgr)
+static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
{
- return rv_reset_cc6_data(hwmgr);
+ return smu10_reset_cc6_data(hwmgr);
}
-static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr)
+static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_DisableGfxOff);
+ if (smu10_data->gfx_off_controled_by_driver)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
return 0;
}
-static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- return rv_disable_gfx_off(hwmgr);
+ return smu10_disable_gfx_off(hwmgr);
}
-static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr)
+static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableGfxOff);
+ if (smu10_data->gfx_off_controled_by_driver)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
return 0;
}
-static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- return rv_enable_gfx_off(hwmgr);
+ return smu10_enable_gfx_off(hwmgr);
}
-static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps)
{
@@ -314,14 +311,14 @@ static const DpmClock_t VddPhyClk[]= {
{ 810, 3600},
};
-static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
- struct rv_voltage_dependency_table **pptable,
+static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
+ struct smu10_voltage_dependency_table **pptable,
uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
{
uint32_t table_size, i;
- struct rv_voltage_dependency_table *ptable;
+ struct smu10_voltage_dependency_table *ptable;
- table_size = sizeof(uint32_t) + sizeof(struct rv_voltage_dependency_table) * num_entry;
+ table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
ptable = kzalloc(table_size, GFP_KERNEL);
if (NULL == ptable)
@@ -341,107 +338,95 @@ static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
}
-static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
+static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- DpmClocks_t *table = &(rv_data->clock_table);
- struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ DpmClocks_t *table = &(smu10_data->clock_table);
+ struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
- result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE);
+ result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
PP_ASSERT_WITH_CODE((0 == result),
"Attempt to copy clock table from smc failed",
return result);
if (0 == result && table->DcefClocks[0].Freq != 0) {
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
NUM_DCEFCLK_DPM_LEVELS,
- &rv_data->clock_table.DcefClocks[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
+ &smu10_data->clock_table.DcefClocks[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
NUM_SOCCLK_DPM_LEVELS,
- &rv_data->clock_table.SocClocks[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
+ &smu10_data->clock_table.SocClocks[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
NUM_FCLK_DPM_LEVELS,
- &rv_data->clock_table.FClocks[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
+ &smu10_data->clock_table.FClocks[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
NUM_MEMCLK_DPM_LEVELS,
- &rv_data->clock_table.MemClocks[0]);
+ &smu10_data->clock_table.MemClocks[0]);
} else {
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
ARRAY_SIZE(VddDcfClk),
&VddDcfClk[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
ARRAY_SIZE(VddSocClk),
&VddSocClk[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
ARRAY_SIZE(VddFClk),
&VddFClk[0]);
}
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
ARRAY_SIZE(VddDispClk),
&VddDispClk[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetMinGfxclkFrequency),
- "Attempt to get min GFXCLK Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &result),
- "Attempt to get min GFXCLK Failed!",
- return -1);
- rv_data->gfx_min_freq_limit = result * 100;
-
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetMaxGfxclkFrequency),
- "Attempt to get max GFXCLK Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &result),
- "Attempt to get max GFXCLK Failed!",
- return -1);
- rv_data->gfx_max_freq_limit = result * 100;
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
+ result = smum_get_argument(hwmgr);
+ smu10_data->gfx_min_freq_limit = result * 100;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
+ result = smum_get_argument(hwmgr);
+ smu10_data->gfx_max_freq_limit = result * 100;
return 0;
}
-static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct rv_hwmgr *data;
+ struct smu10_hwmgr *data;
- data = kzalloc(sizeof(struct rv_hwmgr), GFP_KERNEL);
+ data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
- result = rv_initialize_dpm_defaults(hwmgr);
+ result = smu10_initialize_dpm_defaults(hwmgr);
if (result != 0) {
- pr_err("rv_initialize_dpm_defaults failed\n");
+ pr_err("smu10_initialize_dpm_defaults failed\n");
return result;
}
- rv_populate_clock_table(hwmgr);
+ smu10_populate_clock_table(hwmgr);
- result = rv_get_system_info_data(hwmgr);
+ result = smu10_get_system_info_data(hwmgr);
if (result != 0) {
- pr_err("rv_get_system_info_data failed\n");
+ pr_err("smu10_get_system_info_data failed\n");
return result;
}
- rv_construct_boot_state(hwmgr);
+ smu10_construct_boot_state(hwmgr);
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- RAVEN_MAX_HARDWARE_POWERLEVELS;
+ SMU10_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.hardwarePerformanceLevels =
- RAVEN_MAX_HARDWARE_POWERLEVELS;
+ SMU10_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.vbiosInterruptId = 0;
@@ -451,16 +436,16 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- hwmgr->pstate_sclk = RAVEN_UMD_PSTATE_GFXCLK;
- hwmgr->pstate_mclk = RAVEN_UMD_PSTATE_FCLK;
+ hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
+ hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
return result;
}
-static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
kfree(pinfo->vdd_dep_on_dcefclk);
pinfo->vdd_dep_on_dcefclk = NULL;
@@ -484,7 +469,7 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
return 0;
}
-static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
if (hwmgr->smu_version < 0x1E3700) {
@@ -497,113 +482,113 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ SMU10_UMD_PSTATE_PEAK_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- RAVEN_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- RAVEN_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ SMU10_UMD_PSTATE_PEAK_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- RAVEN_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- RAVEN_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- RAVEN_UMD_PSTATE_MIN_FCLK);
+ SMU10_UMD_PSTATE_MIN_FCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- RAVEN_UMD_PSTATE_MIN_FCLK);
+ SMU10_UMD_PSTATE_MIN_FCLK);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- RAVEN_UMD_PSTATE_GFXCLK);
+ SMU10_UMD_PSTATE_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- RAVEN_UMD_PSTATE_FCLK);
+ SMU10_UMD_PSTATE_FCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- RAVEN_UMD_PSTATE_SOCCLK);
+ SMU10_UMD_PSTATE_SOCCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- RAVEN_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- RAVEN_UMD_PSTATE_GFXCLK);
+ SMU10_UMD_PSTATE_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- RAVEN_UMD_PSTATE_FCLK);
+ SMU10_UMD_PSTATE_FCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- RAVEN_UMD_PSTATE_SOCCLK);
+ SMU10_UMD_PSTATE_SOCCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- RAVEN_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- RAVEN_UMD_PSTATE_MIN_FCLK);
+ SMU10_UMD_PSTATE_MIN_FCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- RAVEN_UMD_PSTATE_MIN_SOCCLK);
+ SMU10_UMD_PSTATE_MIN_SOCCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- RAVEN_UMD_PSTATE_MIN_VCE);
+ SMU10_UMD_PSTATE_MIN_VCE);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ SMU10_UMD_PSTATE_PEAK_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- RAVEN_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- RAVEN_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- RAVEN_UMD_PSTATE_MIN_FCLK);
+ SMU10_UMD_PSTATE_MIN_FCLK);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- RAVEN_UMD_PSTATE_MIN_FCLK);
+ SMU10_UMD_PSTATE_MIN_FCLK);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -613,14 +598,14 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
- struct rv_hwmgr *data;
+ struct smu10_hwmgr *data;
if (hwmgr == NULL)
return -EINVAL;
- data = (struct rv_hwmgr *)(hwmgr->backend);
+ data = (struct smu10_hwmgr *)(hwmgr->backend);
if (low)
return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
@@ -629,14 +614,14 @@ static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
}
-static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
- struct rv_hwmgr *data;
+ struct smu10_hwmgr *data;
if (hwmgr == NULL)
return -EINVAL;
- data = (struct rv_hwmgr *)(hwmgr->backend);
+ data = (struct smu10_hwmgr *)(hwmgr->backend);
if (low)
return data->gfx_min_freq_limit;
@@ -644,34 +629,34 @@ static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
return data->gfx_max_freq_limit;
}
-static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps)
{
return 0;
}
-static int rv_dpm_get_pp_table_entry_callback(
+static int smu10_dpm_get_pp_table_entry_callback(
struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps,
unsigned int index,
const void *clock_info)
{
- struct rv_power_state *rv_ps = cast_rv_ps(hw_ps);
+ struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
- rv_ps->levels[index].engine_clock = 0;
+ smu10_ps->levels[index].engine_clock = 0;
- rv_ps->levels[index].vddc_index = 0;
- rv_ps->level = index + 1;
+ smu10_ps->levels[index].vddc_index = 0;
+ smu10_ps->level = index + 1;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- rv_ps->levels[index].ds_divider_index = 5;
- rv_ps->levels[index].ss_divider_index = 5;
+ smu10_ps->levels[index].ds_divider_index = 5;
+ smu10_ps->levels[index].ss_divider_index = 5;
}
return 0;
}
-static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
+static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
{
int result;
unsigned long ret = 0;
@@ -681,72 +666,66 @@ static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
return result ? 0 : ret;
}
-static int rv_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
unsigned long entry, struct pp_power_state *ps)
{
int result;
- struct rv_power_state *rv_ps;
+ struct smu10_power_state *smu10_ps;
- ps->hardware.magic = PhwRaven_Magic;
+ ps->hardware.magic = SMU10_Magic;
- rv_ps = cast_rv_ps(&(ps->hardware));
+ smu10_ps = cast_smu10_ps(&(ps->hardware));
result = pp_tables_get_entry(hwmgr, entry, ps,
- rv_dpm_get_pp_table_entry_callback);
+ smu10_dpm_get_pp_table_entry_callback);
- rv_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
- rv_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
+ smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
+ smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
return result;
}
-static int rv_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
{
- return sizeof(struct rv_power_state);
+ return sizeof(struct smu10_power_state);
}
-static int rv_set_cpu_power_state(struct pp_hwmgr *hwmgr)
+static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
{
return 0;
}
-static int rv_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
+static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
{
return 0;
}
-static int rv_get_dal_power_level(struct pp_hwmgr *hwmgr,
+static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
struct amd_pp_simple_clock_info *info)
{
return -EINVAL;
}
-static int rv_force_clock_level(struct pp_hwmgr *hwmgr,
+static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
return 0;
}
-static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
+static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
- struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend);
- struct rv_voltage_dependency_table *mclk_table =
+ struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
int i, now, size = 0;
switch (type) {
case PP_SCLK:
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetGfxclkFrequency),
- "Attempt to get current GFXCLK Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &now),
- "Attempt to get current GFXCLK Failed!",
- return -1);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
+ now = smum_get_argument(hwmgr);
size += sprintf(buf + size, "0: %uMhz %s\n",
data->gfx_min_freq_limit / 100,
@@ -758,14 +737,8 @@ static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
== now) ? "*" : "");
break;
case PP_MCLK:
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetFclkFrequency),
- "Attempt to get current MEMCLK Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &now),
- "Attempt to get current MEMCLK Failed!",
- return -1);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
+ now = smum_get_argument(hwmgr);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -781,16 +754,16 @@ static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
-static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
- struct rv_hwmgr *data;
+ struct smu10_hwmgr *data;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
- data = (struct rv_hwmgr *)(hwmgr->backend);
+ data = (struct smu10_hwmgr *)(hwmgr->backend);
if (index == 0) {
level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
@@ -807,10 +780,10 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
return 0;
}
-static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
+static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
{
- const struct rv_power_state *ps = cast_const_rv_ps(state);
+ const struct smu10_power_state *ps = cast_const_smu10_ps(state);
clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
@@ -825,7 +798,7 @@ static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
#define MEM_LATENCY_ERR 0xFFFF
-static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr,
+static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
uint32_t clock)
{
if (clock >= MEM_FREQ_LOW_LATENCY &&
@@ -837,14 +810,14 @@ static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr,
return MEM_LATENCY_ERR;
}
-static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
+static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
uint32_t i;
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- struct rv_voltage_dependency_table *pclk_vol_table;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
+ struct smu10_voltage_dependency_table *pclk_vol_table;
bool latency_required = false;
if (pinfo == NULL)
@@ -881,7 +854,7 @@ static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
for (i = 0; i < pclk_vol_table->count; i++) {
clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
clocks->data[i].latency_in_us = latency_required ?
- rv_get_mem_latency(hwmgr,
+ smu10_get_mem_latency(hwmgr,
pclk_vol_table->entries[i].clk) :
0;
clocks->num_levels++;
@@ -890,14 +863,14 @@ static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
return 0;
}
-static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
uint32_t i;
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- struct rv_voltage_dependency_table *pclk_vol_table = NULL;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
+ struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
if (pinfo == NULL)
return -EINVAL;
@@ -932,29 +905,28 @@ static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
return 0;
}
-int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req)
{
- int result = 0;
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
enum amd_pp_clock_type clk_type = clock_req->clock_type;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
PPSMC_Msg msg;
switch (clk_type) {
case amd_pp_dcf_clock:
- if (clk_freq == rv_data->dcf_actual_hard_min_freq)
+ if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
return 0;
msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
- rv_data->dcf_actual_hard_min_freq = clk_freq;
+ smu10_data->dcf_actual_hard_min_freq = clk_freq;
break;
case amd_pp_soc_clock:
msg = PPSMC_MSG_SetHardMinSocclkByFreq;
break;
case amd_pp_f_clock:
- if (clk_freq == rv_data->f_actual_hard_min_freq)
+ if (clk_freq == smu10_data->f_actual_hard_min_freq)
return 0;
- rv_data->f_actual_hard_min_freq = clk_freq;
+ smu10_data->f_actual_hard_min_freq = clk_freq;
msg = PPSMC_MSG_SetHardMinFclkByFreq;
break;
default:
@@ -962,19 +934,18 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- result = smum_send_msg_to_smc_with_parameter(hwmgr, msg,
- clk_freq);
+ smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
- return result;
+ return 0;
}
-static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
{
clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
return 0;
}
-static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
@@ -990,7 +961,7 @@ static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
return cur_temp;
}
-static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
uint32_t sclk, mclk;
@@ -998,25 +969,21 @@ static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
- if (!ret) {
- rv_read_arg_from_smc(hwmgr, &sclk);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
+ sclk = smum_get_argument(hwmgr);
/* in units of 10KHZ */
- *((uint32_t *)value) = sclk * 100;
- *size = 4;
- }
+ *((uint32_t *)value) = sclk * 100;
+ *size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
- if (!ret) {
- rv_read_arg_from_smc(hwmgr, &mclk);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
+ mclk = smum_get_argument(hwmgr);
/* in units of 10KHZ */
- *((uint32_t *)value) = mclk * 100;
- *size = 4;
- }
+ *((uint32_t *)value) = mclk * 100;
+ *size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = rv_thermal_get_temperature(hwmgr);
+ *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
break;
default:
ret = -EINVAL;
@@ -1026,50 +993,50 @@ static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
return ret;
}
-static int rv_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
}
-static const struct pp_hwmgr_func rv_hwmgr_funcs = {
- .backend_init = rv_hwmgr_backend_init,
- .backend_fini = rv_hwmgr_backend_fini,
+static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+ .backend_init = smu10_hwmgr_backend_init,
+ .backend_fini = smu10_hwmgr_backend_fini,
.asic_setup = NULL,
- .apply_state_adjust_rules = rv_apply_state_adjust_rules,
- .force_dpm_level = rv_dpm_force_dpm_level,
- .get_power_state_size = rv_get_power_state_size,
+ .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
+ .force_dpm_level = smu10_dpm_force_dpm_level,
+ .get_power_state_size = smu10_get_power_state_size,
.powerdown_uvd = NULL,
.powergate_uvd = NULL,
.powergate_vce = NULL,
- .get_mclk = rv_dpm_get_mclk,
- .get_sclk = rv_dpm_get_sclk,
- .patch_boot_state = rv_dpm_patch_boot_state,
- .get_pp_table_entry = rv_dpm_get_pp_table_entry,
- .get_num_of_pp_table_entries = rv_dpm_get_num_of_pp_table_entries,
- .set_cpu_power_state = rv_set_cpu_power_state,
- .store_cc6_data = rv_store_cc6_data,
- .force_clock_level = rv_force_clock_level,
- .print_clock_levels = rv_print_clock_levels,
- .get_dal_power_level = rv_get_dal_power_level,
- .get_performance_level = rv_get_performance_level,
- .get_current_shallow_sleep_clocks = rv_get_current_shallow_sleep_clocks,
- .get_clock_by_type_with_latency = rv_get_clock_by_type_with_latency,
- .get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage,
- .get_max_high_clocks = rv_get_max_high_clocks,
- .read_sensor = rv_read_sensor,
- .set_active_display_count = rv_set_active_display_count,
- .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk,
- .dynamic_state_management_enable = rv_enable_dpm_tasks,
- .power_off_asic = rv_power_off_asic,
- .asic_setup = rv_setup_asic_task,
- .power_state_set = rv_set_power_state_tasks,
- .dynamic_state_management_disable = rv_disable_dpm_tasks,
- .set_mmhub_powergating_by_smu = rv_set_mmhub_powergating_by_smu,
+ .get_mclk = smu10_dpm_get_mclk,
+ .get_sclk = smu10_dpm_get_sclk,
+ .patch_boot_state = smu10_dpm_patch_boot_state,
+ .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
+ .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
+ .set_cpu_power_state = smu10_set_cpu_power_state,
+ .store_cc6_data = smu10_store_cc6_data,
+ .force_clock_level = smu10_force_clock_level,
+ .print_clock_levels = smu10_print_clock_levels,
+ .get_dal_power_level = smu10_get_dal_power_level,
+ .get_performance_level = smu10_get_performance_level,
+ .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
+ .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
+ .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
+ .get_max_high_clocks = smu10_get_max_high_clocks,
+ .read_sensor = smu10_read_sensor,
+ .set_active_display_count = smu10_set_active_display_count,
+ .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
+ .dynamic_state_management_enable = smu10_enable_dpm_tasks,
+ .power_off_asic = smu10_power_off_asic,
+ .asic_setup = smu10_setup_asic_task,
+ .power_state_set = smu10_set_power_state_tasks,
+ .dynamic_state_management_disable = smu10_disable_dpm_tasks,
+ .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
};
-int rv_init_function_pointers(struct pp_hwmgr *hwmgr)
+int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
{
- hwmgr->hwmgr_func = &rv_hwmgr_funcs;
+ hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index c3bc311dc59f..175c3a592b6c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -21,17 +21,17 @@
*
*/
-#ifndef RAVEN_HWMGR_H
-#define RAVEN_HWMGR_H
+#ifndef SMU10_HWMGR_H
+#define SMU10_HWMGR_H
#include "hwmgr.h"
-#include "rv_inc.h"
+#include "smu10_inc.h"
#include "smu10_driver_if.h"
#include "rv_ppsmc.h"
-#define RAVEN_MAX_HARDWARE_POWERLEVELS 8
-#define PHMRAVEN_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
+#define SMU10_MAX_HARDWARE_POWERLEVELS 8
+#define SMU10_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
#define DPMFlags_SCLK_Enabled 0x00000001
#define DPMFlags_UVD_Enabled 0x00000002
@@ -47,10 +47,10 @@
#define SMU_PHYID_SHIFT 8
-#define RAVEN_PCIE_POWERGATING_TARGET_GFX 0
-#define RAVEN_PCIE_POWERGATING_TARGET_DDI 1
-#define RAVEN_PCIE_POWERGATING_TARGET_PLLCASCADE 2
-#define RAVEN_PCIE_POWERGATING_TARGET_PHY 3
+#define SMU10_PCIE_POWERGATING_TARGET_GFX 0
+#define SMU10_PCIE_POWERGATING_TARGET_DDI 1
+#define SMU10_PCIE_POWERGATING_TARGET_PLLCASCADE 2
+#define SMU10_PCIE_POWERGATING_TARGET_PHY 3
enum VQ_TYPE {
CLOCK_TYPE_DCLK = 0L,
@@ -65,14 +65,14 @@ enum VQ_TYPE {
#define SUSTAINABLE_CU_MASK 0xff000000
#define SUSTAINABLE_CU_SHIFT 24
-struct rv_dpm_entry {
+struct smu10_dpm_entry {
uint32_t soft_min_clk;
uint32_t hard_min_clk;
uint32_t soft_max_clk;
uint32_t hard_max_clk;
};
-struct rv_power_level {
+struct smu10_power_level {
uint32_t engine_clock;
uint8_t vddc_index;
uint8_t ds_divider_index;
@@ -86,14 +86,14 @@ struct rv_power_level {
uint8_t rsv[3];
};
-/*used for the nbpsFlags field in rv_power state*/
-#define RAVEN_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
-#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
-#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
+/*used for the nbpsFlags field in smu10_power state*/
+#define SMU10_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
+#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
+#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
-#define RAVEN_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0)
+#define SMU10_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0)
-struct rv_uvd_clocks {
+struct smu10_uvd_clocks {
uint32_t vclk;
uint32_t dclk;
uint32_t vclk_low_divider;
@@ -118,16 +118,16 @@ struct pp_disable_nbpslo_flags {
};
-enum rv_pstate_previous_action {
+enum smu10_pstate_previous_action {
DO_NOTHING = 1,
FORCE_HIGH,
CANCEL_FORCE_HIGH
};
-struct rv_power_state {
+struct smu10_power_state {
unsigned int magic;
uint32_t level;
- struct rv_uvd_clocks uvd_clocks;
+ struct smu10_uvd_clocks uvd_clocks;
uint32_t evclk;
uint32_t ecclk;
uint32_t samclk;
@@ -141,79 +141,79 @@ struct rv_power_state {
uint8_t dpm_x_nbps_low;
uint8_t dpm_x_nbps_high;
- enum rv_pstate_previous_action action;
+ enum smu10_pstate_previous_action action;
- struct rv_power_level levels[RAVEN_MAX_HARDWARE_POWERLEVELS];
+ struct smu10_power_level levels[SMU10_MAX_HARDWARE_POWERLEVELS];
struct pp_disable_nbpslo_flags nbpslo_flags;
};
-#define RAVEN_NUM_NBPSTATES 4
-#define RAVEN_NUM_NBPMEMORYCLOCK 2
+#define SMU10_NUM_NBPSTATES 4
+#define SMU10_NUM_NBPMEMORYCLOCK 2
-struct rv_display_phy_info_entry {
+struct smu10_display_phy_info_entry {
uint8_t phy_present;
uint8_t active_lane_mapping;
uint8_t display_config_type;
uint8_t active_num_of_lanes;
};
-#define RAVEN_MAX_DISPLAYPHY_IDS 10
+#define SMU10_MAX_DISPLAYPHY_IDS 10
-struct rv_display_phy_info {
+struct smu10_display_phy_info {
bool display_phy_access_initialized;
- struct rv_display_phy_info_entry entries[RAVEN_MAX_DISPLAYPHY_IDS];
+ struct smu10_display_phy_info_entry entries[SMU10_MAX_DISPLAYPHY_IDS];
};
#define MAX_DISPLAY_CLOCK_LEVEL 8
-struct rv_system_info{
+struct smu10_system_info{
uint8_t htc_tmp_lmt;
uint8_t htc_hyst_lmt;
};
#define MAX_REGULAR_DPM_NUMBER 8
-struct rv_mclk_latency_entries {
+struct smu10_mclk_latency_entries {
uint32_t frequency;
uint32_t latency;
};
-struct rv_mclk_latency_table {
+struct smu10_mclk_latency_table {
uint32_t count;
- struct rv_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
+ struct smu10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
};
-struct rv_clock_voltage_dependency_record {
+struct smu10_clock_voltage_dependency_record {
uint32_t clk;
uint32_t vol;
};
-struct rv_voltage_dependency_table {
+struct smu10_voltage_dependency_table {
uint32_t count;
- struct rv_clock_voltage_dependency_record entries[1];
+ struct smu10_clock_voltage_dependency_record entries[1];
};
-struct rv_clock_voltage_information {
- struct rv_voltage_dependency_table *vdd_dep_on_dcefclk;
- struct rv_voltage_dependency_table *vdd_dep_on_socclk;
- struct rv_voltage_dependency_table *vdd_dep_on_fclk;
- struct rv_voltage_dependency_table *vdd_dep_on_mclk;
- struct rv_voltage_dependency_table *vdd_dep_on_dispclk;
- struct rv_voltage_dependency_table *vdd_dep_on_dppclk;
- struct rv_voltage_dependency_table *vdd_dep_on_phyclk;
+struct smu10_clock_voltage_information {
+ struct smu10_voltage_dependency_table *vdd_dep_on_dcefclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_socclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_fclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_mclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_dispclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_dppclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_phyclk;
};
-struct rv_hwmgr {
+struct smu10_hwmgr {
uint32_t disable_driver_thermal_policy;
uint32_t thermal_auto_throttling_treshold;
- struct rv_system_info sys_info;
- struct rv_mclk_latency_table mclk_latency_table;
+ struct smu10_system_info sys_info;
+ struct smu10_mclk_latency_table mclk_latency_table;
uint32_t ddi_power_gating_disabled;
- struct rv_display_phy_info_entry display_phy_info;
+ struct smu10_display_phy_info_entry display_phy_info;
uint32_t dce_slow_sclk_threshold;
bool disp_clk_bypass;
@@ -255,10 +255,10 @@ struct rv_hwmgr {
uint32_t fps_low_threshold;
uint32_t dpm_flags;
- struct rv_dpm_entry sclk_dpm;
- struct rv_dpm_entry uvd_dpm;
- struct rv_dpm_entry vce_dpm;
- struct rv_dpm_entry acp_dpm;
+ struct smu10_dpm_entry sclk_dpm;
+ struct smu10_dpm_entry uvd_dpm;
+ struct smu10_dpm_entry vce_dpm;
+ struct smu10_dpm_entry acp_dpm;
bool acp_power_up_no_dsp;
uint32_t max_sclk_level;
@@ -291,7 +291,7 @@ struct rv_hwmgr {
bool gfx_off_controled_by_driver;
Watermarks_t water_marks_table;
- struct rv_clock_voltage_information clock_vol_info;
+ struct smu10_clock_voltage_information clock_vol_info;
DpmClocks_t clock_table;
uint32_t active_process_mask;
@@ -302,21 +302,21 @@ struct rv_hwmgr {
struct pp_hwmgr;
-int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
+int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
-/* UMD PState Raven Msg Parameters in MHz */
-#define RAVEN_UMD_PSTATE_GFXCLK 700
-#define RAVEN_UMD_PSTATE_SOCCLK 626
-#define RAVEN_UMD_PSTATE_FCLK 933
-#define RAVEN_UMD_PSTATE_VCE 0x03C00320
+/* UMD PState SMU10 Msg Parameters in MHz */
+#define SMU10_UMD_PSTATE_GFXCLK 700
+#define SMU10_UMD_PSTATE_SOCCLK 626
+#define SMU10_UMD_PSTATE_FCLK 933
+#define SMU10_UMD_PSTATE_VCE 0x03C00320
-#define RAVEN_UMD_PSTATE_PEAK_GFXCLK 1100
-#define RAVEN_UMD_PSTATE_PEAK_SOCCLK 757
-#define RAVEN_UMD_PSTATE_PEAK_FCLK 1200
+#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100
+#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757
+#define SMU10_UMD_PSTATE_PEAK_FCLK 1200
-#define RAVEN_UMD_PSTATE_MIN_GFXCLK 200
-#define RAVEN_UMD_PSTATE_MIN_FCLK 400
-#define RAVEN_UMD_PSTATE_MIN_SOCCLK 200
-#define RAVEN_UMD_PSTATE_MIN_VCE 0x0190012C
+#define SMU10_UMD_PSTATE_MIN_GFXCLK 200
+#define SMU10_UMD_PSTATE_MIN_FCLK 400
+#define SMU10_UMD_PSTATE_MIN_SOCCLK 200
+#define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
index ae59a3fdea8a..edb68e302f6f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
@@ -21,8 +21,8 @@
*
*/
-#ifndef RAVEN_INC_H
-#define RAVEN_INC_H
+#ifndef SMU10_INC_H
+#define SMU10_INC_H
#include "asic_reg/mp/mp_10_0_default.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 7b54d48b2ce2..1ddce023218a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -25,7 +25,6 @@
#define _SMU7_CLOCK_POWER_GATING_H_
#include "smu7_hwmgr.h"
-#include "pp_asicblocks.h"
void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index df2a312ca6c9..7a87209f7258 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -40,7 +40,6 @@
#include "hwmgr.h"
#include "smu7_hwmgr.h"
-#include "smu7_smumgr.h"
#include "smu_ucode_xfer_vi.h"
#include "smu7_powertune.h"
#include "smu7_dyn_defaults.h"
@@ -1353,12 +1352,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
-
- if (smu_data == NULL)
- return -EINVAL;
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ if (!hwmgr->avfs_supported)
return 0;
if (enable) {
@@ -1382,13 +1376,9 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (smu_data == NULL)
- return -EINVAL;
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ if (!hwmgr->avfs_supported)
return 0;
if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 8c1f884ae555..75a465f771f0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -35,9 +35,8 @@
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "cz_ppsmc.h"
-#include "cz_hwmgr.h"
+#include "smu8_hwmgr.h"
#include "power_state.h"
-#include "cz_clockpowergating.h"
#include "pp_thermal.h"
#define ixSMUSVI_NB_CURRENTVID 0xD8230044
@@ -47,26 +46,26 @@
#define CURRENT_GFX_VID_MASK 0xff000000
#define CURRENT_GFX_VID__SHIFT 24
-static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic;
+static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
-static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps)
+static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
{
- if (PhwCz_Magic != hw_ps->magic)
+ if (smu8_magic != hw_ps->magic)
return NULL;
- return (struct cz_power_state *)hw_ps;
+ return (struct smu8_power_state *)hw_ps;
}
-static const struct cz_power_state *cast_const_PhwCzPowerState(
+static const struct smu8_power_state *cast_const_smu8_power_state(
const struct pp_hw_power_state *hw_ps)
{
- if (PhwCz_Magic != hw_ps->magic)
+ if (smu8_magic != hw_ps->magic)
return NULL;
- return (struct cz_power_state *)hw_ps;
+ return (struct smu8_power_state *)hw_ps;
}
-static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -97,7 +96,7 @@ static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
return i;
}
-static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -127,7 +126,7 @@ static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
return i;
}
-static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
+static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -158,42 +157,42 @@ static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
return i;
}
-static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
+static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- if (cz_hwmgr->max_sclk_level == 0) {
+ if (data->max_sclk_level == 0) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
- cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1;
+ data->max_sclk_level = smum_get_argument(hwmgr) + 1;
}
- return cz_hwmgr->max_sclk_level;
+ return data->max_sclk_level;
}
-static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct amdgpu_device *adev = hwmgr->adev;
- cz_hwmgr->gfx_ramp_step = 256*25/100;
- cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
-
- cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
- cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
- cz_hwmgr->clock_slow_down_freq = 25000;
- cz_hwmgr->skip_clock_slow_down = 1;
- cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
- cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
- cz_hwmgr->voting_rights_clients = 0x00C00033;
- cz_hwmgr->static_screen_threshold = 8;
- cz_hwmgr->ddi_power_gating_disabled = 0;
- cz_hwmgr->bapm_enabled = 1;
- cz_hwmgr->voltage_drop_threshold = 0;
- cz_hwmgr->gfx_power_gating_threshold = 500;
- cz_hwmgr->vce_slow_sclk_threshold = 20000;
- cz_hwmgr->dce_slow_sclk_threshold = 30000;
- cz_hwmgr->disable_driver_thermal_policy = 1;
- cz_hwmgr->disable_nb_ps3_in_battery = 0;
+ data->gfx_ramp_step = 256*25/100;
+ data->gfx_ramp_delay = 1; /* by default, we delay 1us */
+
+ data->mgcg_cgtt_local0 = 0x00000000;
+ data->mgcg_cgtt_local1 = 0x00000000;
+ data->clock_slow_down_freq = 25000;
+ data->skip_clock_slow_down = 1;
+ data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
+ data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
+ data->voting_rights_clients = 0x00C00033;
+ data->static_screen_threshold = 8;
+ data->ddi_power_gating_disabled = 0;
+ data->bapm_enabled = 1;
+ data->voltage_drop_threshold = 0;
+ data->gfx_power_gating_threshold = 500;
+ data->vce_slow_sclk_threshold = 20000;
+ data->dce_slow_sclk_threshold = 30000;
+ data->disable_driver_thermal_policy = 1;
+ data->disable_nb_ps3_in_battery = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ABM);
@@ -204,14 +203,14 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicM3Arbiter);
- cz_hwmgr->override_dynamic_mgpg = 1;
+ data->override_dynamic_mgpg = 1;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicPatchPowerState);
- cz_hwmgr->thermal_auto_throttling_treshold = 0;
- cz_hwmgr->tdr_clock = 0;
- cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
+ data->thermal_auto_throttling_treshold = 0;
+ data->tdr_clock = 0;
+ data->disable_gfx_power_gating_in_uvd = 0;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicUVDState);
@@ -221,10 +220,10 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEDPM);
- cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
- cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
- cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
- cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0;
+ data->cc6_settings.cpu_cc6_disable = false;
+ data->cc6_settings.cpu_pstate_disable = false;
+ data->cc6_settings.nb_pstate_switch_disable = false;
+ data->cc6_settings.cpu_pstate_separation_time = 0;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableVoltageIsland);
@@ -245,30 +244,30 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
return 0;
}
-static uint32_t cz_convert_8Bit_index_to_voltage(
+static uint32_t smu8_convert_8Bit_index_to_voltage(
struct pp_hwmgr *hwmgr, uint16_t voltage)
{
return 6200 - (voltage * 25);
}
-static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
+static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
struct phm_clock_and_voltage_limits *table)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
- struct cz_sys_info *sys_info = &cz_hwmgr->sys_info;
+ struct smu8_hwmgr *data = hwmgr->backend;
+ struct smu8_sys_info *sys_info = &data->sys_info;
struct phm_clock_voltage_dependency_table *dep_table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
if (dep_table->count > 0) {
table->sclk = dep_table->entries[dep_table->count-1].clk;
- table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr,
+ table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
(uint16_t)dep_table->entries[dep_table->count-1].v);
}
table->mclk = sys_info->nbp_memory_clock[0];
return 0;
}
-static int cz_init_dynamic_state_adjustment_rule_settings(
+static int smu8_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr,
ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
{
@@ -306,9 +305,9 @@ static int cz_init_dynamic_state_adjustment_rule_settings(
return 0;
}
-static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
+static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
+ struct smu8_hwmgr *data = hwmgr->backend;
ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
uint32_t i;
int result = 0;
@@ -330,67 +329,67 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
return -EINVAL;
}
- cz_hwmgr->sys_info.bootup_uma_clock =
+ data->sys_info.bootup_uma_clock =
le32_to_cpu(info->ulBootUpUMAClock);
- cz_hwmgr->sys_info.bootup_engine_clock =
+ data->sys_info.bootup_engine_clock =
le32_to_cpu(info->ulBootUpEngineClock);
- cz_hwmgr->sys_info.dentist_vco_freq =
+ data->sys_info.dentist_vco_freq =
le32_to_cpu(info->ulDentistVCOFreq);
- cz_hwmgr->sys_info.system_config =
+ data->sys_info.system_config =
le32_to_cpu(info->ulSystemConfig);
- cz_hwmgr->sys_info.bootup_nb_voltage_index =
+ data->sys_info.bootup_nb_voltage_index =
le16_to_cpu(info->usBootUpNBVoltage);
- cz_hwmgr->sys_info.htc_hyst_lmt =
+ data->sys_info.htc_hyst_lmt =
(info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
- cz_hwmgr->sys_info.htc_tmp_lmt =
+ data->sys_info.htc_tmp_lmt =
(info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
- if (cz_hwmgr->sys_info.htc_tmp_lmt <=
- cz_hwmgr->sys_info.htc_hyst_lmt) {
+ if (data->sys_info.htc_tmp_lmt <=
+ data->sys_info.htc_hyst_lmt) {
pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
return -EINVAL;
}
- cz_hwmgr->sys_info.nb_dpm_enable =
- cz_hwmgr->enable_nb_ps_policy &&
+ data->sys_info.nb_dpm_enable =
+ data->enable_nb_ps_policy &&
(le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
- for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
- if (i < CZ_NUM_NBPMEMORYCLOCK) {
- cz_hwmgr->sys_info.nbp_memory_clock[i] =
+ for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
+ if (i < SMU8_NUM_NBPMEMORYCLOCK) {
+ data->sys_info.nbp_memory_clock[i] =
le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
}
- cz_hwmgr->sys_info.nbp_n_clock[i] =
+ data->sys_info.nbp_n_clock[i] =
le32_to_cpu(info->ulNbpStateNClkFreq[i]);
}
for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
- cz_hwmgr->sys_info.display_clock[i] =
+ data->sys_info.display_clock[i] =
le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
}
/* Here use 4 levels, make sure not exceed */
- for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
- cz_hwmgr->sys_info.nbp_voltage_index[i] =
+ for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
+ data->sys_info.nbp_voltage_index[i] =
le16_to_cpu(info->usNBPStateVoltage[i]);
}
- if (!cz_hwmgr->sys_info.nb_dpm_enable) {
- for (i = 1; i < CZ_NUM_NBPSTATES; i++) {
- if (i < CZ_NUM_NBPMEMORYCLOCK) {
- cz_hwmgr->sys_info.nbp_memory_clock[i] =
- cz_hwmgr->sys_info.nbp_memory_clock[0];
+ if (!data->sys_info.nb_dpm_enable) {
+ for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
+ if (i < SMU8_NUM_NBPMEMORYCLOCK) {
+ data->sys_info.nbp_memory_clock[i] =
+ data->sys_info.nbp_memory_clock[0];
}
- cz_hwmgr->sys_info.nbp_n_clock[i] =
- cz_hwmgr->sys_info.nbp_n_clock[0];
- cz_hwmgr->sys_info.nbp_voltage_index[i] =
- cz_hwmgr->sys_info.nbp_voltage_index[0];
+ data->sys_info.nbp_n_clock[i] =
+ data->sys_info.nbp_n_clock[0];
+ data->sys_info.nbp_voltage_index[i] =
+ data->sys_info.nbp_voltage_index[0];
}
}
@@ -400,40 +399,40 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_EnableDFSBypass);
}
- cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber;
+ data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
- cz_construct_max_power_limits_table (hwmgr,
+ smu8_construct_max_power_limits_table (hwmgr,
&hwmgr->dyn_state.max_clock_voltage_on_ac);
- cz_init_dynamic_state_adjustment_rule_settings(hwmgr,
+ smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
&info->sDISPCLK_Voltage[0]);
return result;
}
-static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
+static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->boot_power_level.engineClock =
- cz_hwmgr->sys_info.bootup_engine_clock;
+ data->boot_power_level.engineClock =
+ data->sys_info.bootup_engine_clock;
- cz_hwmgr->boot_power_level.vddcIndex =
- (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
+ data->boot_power_level.vddcIndex =
+ (uint8_t)data->sys_info.bootup_nb_voltage_index;
- cz_hwmgr->boot_power_level.dsDividerIndex = 0;
- cz_hwmgr->boot_power_level.ssDividerIndex = 0;
- cz_hwmgr->boot_power_level.allowGnbSlow = 1;
- cz_hwmgr->boot_power_level.forceNBPstate = 0;
- cz_hwmgr->boot_power_level.hysteresis_up = 0;
- cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
- cz_hwmgr->boot_power_level.display_wm = 0;
- cz_hwmgr->boot_power_level.vce_wm = 0;
+ data->boot_power_level.dsDividerIndex = 0;
+ data->boot_power_level.ssDividerIndex = 0;
+ data->boot_power_level.allowGnbSlow = 1;
+ data->boot_power_level.forceNBPstate = 0;
+ data->boot_power_level.hysteresis_up = 0;
+ data->boot_power_level.numSIMDToPowerDown = 0;
+ data->boot_power_level.display_wm = 0;
+ data->boot_power_level.vce_wm = 0;
return 0;
}
-static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
+static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
{
struct SMU8_Fusion_ClkTable *clock_table;
int ret;
@@ -463,18 +462,18 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
clock_table = (struct SMU8_Fusion_ClkTable *)table;
/* patch clock table */
- PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
+ for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
/* vddc_sclk */
clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
@@ -552,9 +551,9 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
return ret;
}
-static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
+static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long clock = 0, level;
@@ -562,25 +561,25 @@ static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
- cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
+ data->sclk_dpm.soft_min_clk = table->entries[0].clk;
+ data->sclk_dpm.hard_min_clk = table->entries[0].clk;
- level = cz_get_max_sclk_level(hwmgr) - 1;
+ level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clock = table->entries[level].clk;
else
clock = table->entries[table->count - 1].clk;
- cz_hwmgr->sclk_dpm.soft_max_clk = clock;
- cz_hwmgr->sclk_dpm.hard_max_clk = clock;
+ data->sclk_dpm.soft_max_clk = clock;
+ data->sclk_dpm.hard_max_clk = clock;
return 0;
}
-static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
+static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_uvd_clock_voltage_dependency_table *table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
unsigned long clock = 0, level;
@@ -588,8 +587,8 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->uvd_dpm.soft_min_clk = 0;
- cz_hwmgr->uvd_dpm.hard_min_clk = 0;
+ data->uvd_dpm.soft_min_clk = 0;
+ data->uvd_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
level = smum_get_argument(hwmgr);
@@ -599,15 +598,15 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
else
clock = table->entries[table->count - 1].vclk;
- cz_hwmgr->uvd_dpm.soft_max_clk = clock;
- cz_hwmgr->uvd_dpm.hard_max_clk = clock;
+ data->uvd_dpm.soft_max_clk = clock;
+ data->uvd_dpm.hard_max_clk = clock;
return 0;
}
-static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
+static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
unsigned long clock = 0, level;
@@ -615,8 +614,8 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->vce_dpm.soft_min_clk = 0;
- cz_hwmgr->vce_dpm.hard_min_clk = 0;
+ data->vce_dpm.soft_min_clk = 0;
+ data->vce_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
level = smum_get_argument(hwmgr);
@@ -626,15 +625,15 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
else
clock = table->entries[table->count - 1].ecclk;
- cz_hwmgr->vce_dpm.soft_max_clk = clock;
- cz_hwmgr->vce_dpm.hard_max_clk = clock;
+ data->vce_dpm.soft_max_clk = clock;
+ data->vce_dpm.hard_max_clk = clock;
return 0;
}
-static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
+static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_acp_clock_voltage_dependency_table *table =
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
unsigned long clock = 0, level;
@@ -642,8 +641,8 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->acp_dpm.soft_min_clk = 0;
- cz_hwmgr->acp_dpm.hard_min_clk = 0;
+ data->acp_dpm.soft_min_clk = 0;
+ data->acp_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
level = smum_get_argument(hwmgr);
@@ -653,32 +652,32 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
else
clock = table->entries[table->count - 1].acpclk;
- cz_hwmgr->acp_dpm.soft_max_clk = clock;
- cz_hwmgr->acp_dpm.hard_max_clk = clock;
+ data->acp_dpm.soft_max_clk = clock;
+ data->acp_dpm.hard_max_clk = clock;
return 0;
}
-static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr)
+static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->uvd_power_gated = false;
- cz_hwmgr->vce_power_gated = false;
- cz_hwmgr->samu_power_gated = false;
- cz_hwmgr->acp_power_gated = false;
- cz_hwmgr->pgacpinit = true;
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
+ data->acp_power_gated = false;
+ data->pgacpinit = true;
}
-static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->low_sclk_interrupt_threshold = 0;
+ data->low_sclk_interrupt_threshold = 0;
}
-static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
+static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -687,29 +686,29 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
unsigned long stable_pstate_sclk;
unsigned long percentage;
- cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
- level = cz_get_max_sclk_level(hwmgr) - 1;
+ data->sclk_dpm.soft_min_clk = table->entries[0].clk;
+ level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
- cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk;
+ data->sclk_dpm.soft_max_clk = table->entries[level].clk;
else
- cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
+ data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
clock = hwmgr->display_config.min_core_set_clock;
if (clock == 0)
pr_debug("min_core_set_clock not set\n");
- if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
- cz_hwmgr->sclk_dpm.hard_min_clk = clock;
+ if (data->sclk_dpm.hard_min_clk != clock) {
+ data->sclk_dpm.hard_min_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkHardMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.hard_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.hard_min_clk,
PPSMC_MSG_SetSclkHardMin));
}
- clock = cz_hwmgr->sclk_dpm.soft_min_clk;
+ clock = data->sclk_dpm.soft_min_clk;
/* update minimum clocks for Stable P-State feature */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -723,36 +722,36 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
clock = stable_pstate_sclk;
}
- if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
- cz_hwmgr->sclk_dpm.soft_min_clk = clock;
+ if (data->sclk_dpm.soft_min_clk != clock) {
+ data->sclk_dpm.soft_min_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState) &&
- cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
- cz_hwmgr->sclk_dpm.soft_max_clk = clock;
+ data->sclk_dpm.soft_max_clk != clock) {
+ data->sclk_dpm.soft_max_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
}
return 0;
}
-static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
+static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
if (clks == 0)
- clks = CZ_MIN_DEEP_SLEEP_SCLK;
+ clks = SMU8_MIN_DEEP_SLEEP_SCLK;
PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
@@ -764,21 +763,21 @@ static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr)
+static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr =
- (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data =
+ hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
- cz_hwmgr->sclk_dpm.soft_max_clk);
+ data->sclk_dpm.soft_max_clk);
return 0;
}
-static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
+static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
if (hw_data->is_nb_dpm_enabled) {
if (enable) {
@@ -799,35 +798,35 @@ static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, b
return 0;
}
-static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
- if (cz_hwmgr->is_nb_dpm_enabled) {
- cz_nbdpm_pstate_enable_disable(hwmgr, true, true);
+ if (data->is_nb_dpm_enabled) {
+ smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
dpm_features);
if (ret == 0)
- cz_hwmgr->is_nb_dpm_enabled = false;
+ data->is_nb_dpm_enabled = false;
}
return ret;
}
-static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
- if (!cz_hwmgr->is_nb_dpm_enabled) {
+ if (!data->is_nb_dpm_enabled) {
PP_DBG_LOG("enabling ALL SMU features.\n");
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
@@ -835,94 +834,94 @@ static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
PPSMC_MSG_EnableAllSmuFeatures,
dpm_features);
if (ret == 0)
- cz_hwmgr->is_nb_dpm_enabled = true;
+ data->is_nb_dpm_enabled = true;
}
return ret;
}
-static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
+static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
{
bool disable_switch;
bool enable_low_mem_state;
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
- const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state);
+ const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
if (hw_data->sys_info.nb_dpm_enable) {
disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
if (pnew_state->action == FORCE_HIGH)
- cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
+ smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
else if (pnew_state->action == CANCEL_FORCE_HIGH)
- cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
+ smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
else
- cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
+ smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
}
return 0;
}
-static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
int ret = 0;
- cz_update_sclk_limit(hwmgr);
- cz_set_deep_sleep_sclk_threshold(hwmgr);
- cz_set_watermark_threshold(hwmgr);
- ret = cz_enable_nb_dpm(hwmgr);
+ smu8_update_sclk_limit(hwmgr);
+ smu8_set_deep_sleep_sclk_threshold(hwmgr);
+ smu8_set_watermark_threshold(hwmgr);
+ ret = smu8_enable_nb_dpm(hwmgr);
if (ret)
return ret;
- cz_update_low_mem_pstate(hwmgr, input);
+ smu8_update_low_mem_pstate(hwmgr, input);
return 0;
};
-static int cz_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int ret;
- ret = cz_upload_pptable_to_smu(hwmgr);
+ ret = smu8_upload_pptable_to_smu(hwmgr);
if (ret)
return ret;
- ret = cz_init_sclk_limit(hwmgr);
+ ret = smu8_init_sclk_limit(hwmgr);
if (ret)
return ret;
- ret = cz_init_uvd_limit(hwmgr);
+ ret = smu8_init_uvd_limit(hwmgr);
if (ret)
return ret;
- ret = cz_init_vce_limit(hwmgr);
+ ret = smu8_init_vce_limit(hwmgr);
if (ret)
return ret;
- ret = cz_init_acp_limit(hwmgr);
+ ret = smu8_init_acp_limit(hwmgr);
if (ret)
return ret;
- cz_init_power_gate_state(hwmgr);
- cz_init_sclk_threshold(hwmgr);
+ smu8_init_power_gate_state(hwmgr);
+ smu8_init_sclk_threshold(hwmgr);
return 0;
}
-static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
+static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->disp_clk_bypass_pending = false;
hw_data->disp_clk_bypass = false;
}
-static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
+static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->is_nb_dpm_enabled = false;
}
-static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
+static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->cc6_settings.cc6_setting_changed = false;
hw_data->cc6_settings.cpu_pstate_separation_time = 0;
@@ -930,45 +929,47 @@ static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
hw_data->cc6_settings.cpu_pstate_disable = false;
}
-static int cz_power_off_asic(struct pp_hwmgr *hwmgr)
+static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
{
- cz_power_up_display_clock_sys_pll(hwmgr);
- cz_clear_nb_dpm_flag(hwmgr);
- cz_reset_cc6_data(hwmgr);
+ smu8_power_up_display_clock_sys_pll(hwmgr);
+ smu8_clear_nb_dpm_flag(hwmgr);
+ smu8_reset_cc6_data(hwmgr);
return 0;
};
-static void cz_program_voting_clients(struct pp_hwmgr *hwmgr)
+static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
{
- PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
- PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0,
+ SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
}
-static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
+static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
{
- PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
}
-static int cz_start_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
+ data->dpm_flags |= DPMFlags_SCLK_Enabled;
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
SCLK_DPM_MASK);
}
-static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
- if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) {
+ if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
dpm_features |= SCLK_DPM_MASK;
- cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled;
+ data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
dpm_features);
@@ -976,80 +977,80 @@ static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
return ret;
}
-static int cz_program_bootup_state(struct pp_hwmgr *hwmgr)
+static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
- cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
+ data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
+ data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
return 0;
}
-static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
+static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->acp_boot_level = 0xff;
+ data->acp_boot_level = 0xff;
}
-static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- cz_disable_nb_dpm(hwmgr);
+ smu8_disable_nb_dpm(hwmgr);
- cz_clear_voting_clients(hwmgr);
- if (cz_stop_dpm(hwmgr))
+ smu8_clear_voting_clients(hwmgr);
+ if (smu8_stop_dpm(hwmgr))
return -EINVAL;
return 0;
};
-static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- cz_program_voting_clients(hwmgr);
- if (cz_start_dpm(hwmgr))
+ smu8_program_voting_clients(hwmgr);
+ if (smu8_start_dpm(hwmgr))
return -EINVAL;
- cz_program_bootup_state(hwmgr);
- cz_reset_acp_boot_level(hwmgr);
+ smu8_program_bootup_state(hwmgr);
+ smu8_reset_acp_boot_level(hwmgr);
return 0;
};
-static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps)
{
- struct cz_power_state *cz_ps =
- cast_PhwCzPowerState(&prequest_ps->hardware);
+ struct smu8_power_state *smu8_ps =
+ cast_smu8_power_state(&prequest_ps->hardware);
- const struct cz_power_state *cz_current_ps =
- cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
+ const struct smu8_power_state *smu8_current_ps =
+ cast_const_smu8_power_state(&pcurrent_ps->hardware);
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct PP_Clocks clocks = {0, 0, 0, 0};
bool force_high;
uint32_t num_of_active_displays = 0;
struct cgs_display_info info = {0};
- cz_ps->need_dfs_bypass = true;
+ smu8_ps->need_dfs_bypass = true;
- cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
+ data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
hwmgr->display_config.min_mem_set_clock :
- cz_hwmgr->sys_info.nbp_memory_clock[1];
+ data->sys_info.nbp_memory_clock[1];
cgs_get_active_displays_info(hwmgr->device, &info);
num_of_active_displays = info.display_count;
@@ -1057,56 +1058,56 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
- force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
+ force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
|| (num_of_active_displays >= 3);
- cz_ps->action = cz_current_ps->action;
+ smu8_ps->action = smu8_current_ps->action;
if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- cz_nbdpm_pstate_enable_disable(hwmgr, false, false);
+ smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
- cz_nbdpm_pstate_enable_disable(hwmgr, false, true);
- else if (!force_high && (cz_ps->action == FORCE_HIGH))
- cz_ps->action = CANCEL_FORCE_HIGH;
- else if (force_high && (cz_ps->action != FORCE_HIGH))
- cz_ps->action = FORCE_HIGH;
+ smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
+ else if (!force_high && (smu8_ps->action == FORCE_HIGH))
+ smu8_ps->action = CANCEL_FORCE_HIGH;
+ else if (force_high && (smu8_ps->action != FORCE_HIGH))
+ smu8_ps->action = FORCE_HIGH;
else
- cz_ps->action = DO_NOTHING;
+ smu8_ps->action = DO_NOTHING;
return 0;
}
-static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct cz_hwmgr *data;
+ struct smu8_hwmgr *data;
- data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
+ data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
- result = cz_initialize_dpm_defaults(hwmgr);
+ result = smu8_initialize_dpm_defaults(hwmgr);
if (result != 0) {
- pr_err("cz_initialize_dpm_defaults failed\n");
+ pr_err("smu8_initialize_dpm_defaults failed\n");
return result;
}
- result = cz_get_system_info_data(hwmgr);
+ result = smu8_get_system_info_data(hwmgr);
if (result != 0) {
- pr_err("cz_get_system_info_data failed\n");
+ pr_err("smu8_get_system_info_data failed\n");
return result;
}
- cz_construct_boot_state(hwmgr);
+ smu8_construct_boot_state(hwmgr);
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU8_MAX_HARDWARE_POWERLEVELS;
return result;
}
-static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
if (hwmgr != NULL) {
kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
@@ -1118,28 +1119,28 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMin));
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
return 0;
}
-static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long clock = 0, level;
@@ -1147,56 +1148,56 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
- cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
+ data->sclk_dpm.soft_min_clk = table->entries[0].clk;
+ data->sclk_dpm.hard_min_clk = table->entries[0].clk;
hwmgr->pstate_sclk = table->entries[0].clk;
hwmgr->pstate_mclk = 0;
- level = cz_get_max_sclk_level(hwmgr) - 1;
+ level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clock = table->entries[level].clk;
else
clock = table->entries[table->count - 1].clk;
- cz_hwmgr->sclk_dpm.soft_max_clk = clock;
- cz_hwmgr->sclk_dpm.hard_max_clk = clock;
+ data->sclk_dpm.soft_max_clk = clock;
+ data->sclk_dpm.hard_max_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
return 0;
}
-static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMax));
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
return 0;
}
-static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
@@ -1204,15 +1205,15 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = cz_phm_force_dpm_highest(hwmgr);
+ ret = smu8_phm_force_dpm_highest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = cz_phm_force_dpm_lowest(hwmgr);
+ ret = smu8_phm_force_dpm_lowest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = cz_phm_unforce_dpm_levels(hwmgr);
+ ret = smu8_phm_unforce_dpm_levels(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1223,14 +1224,14 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return ret;
}
-int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
return 0;
}
-int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
return smum_send_msg_to_smc_with_parameter(
@@ -1242,52 +1243,22 @@ int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- struct phm_uvd_clock_voltage_dependency_table *ptable =
- hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
- if (!bgate) {
- /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
- if (PP_CAP(PHM_PlatformCaps_StablePState) ||
- hwmgr->en_umd_pstate) {
- cz_hwmgr->uvd_dpm.hard_min_clk =
- ptable->entries[ptable->count - 1].vclk;
-
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetUvdHardMin,
- cz_get_uvd_level(hwmgr,
- cz_hwmgr->uvd_dpm.hard_min_clk,
- PPSMC_MSG_SetUvdHardMin));
-
- cz_enable_disable_uvd_dpm(hwmgr, true);
- } else {
- cz_enable_disable_uvd_dpm(hwmgr, true);
- }
- } else {
- cz_enable_disable_uvd_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
-int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
if (PP_CAP(PHM_PlatformCaps_StablePState) ||
hwmgr->en_umd_pstate) {
- cz_hwmgr->vce_dpm.hard_min_clk =
+ data->vce_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].ecclk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
+ smu8_get_eclk_level(hwmgr,
+ data->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
@@ -1301,7 +1272,7 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
@@ -1309,7 +1280,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
@@ -1317,17 +1288,17 @@ int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- return cz_hwmgr->sys_info.bootup_uma_clock;
+ return data->sys_info.bootup_uma_clock;
}
-static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
- struct cz_power_state *cz_ps;
+ struct smu8_power_state *smu8_ps;
if (hwmgr == NULL)
return -EINVAL;
@@ -1337,59 +1308,59 @@ static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
if (ps == NULL)
return -EINVAL;
- cz_ps = cast_PhwCzPowerState(&ps->hardware);
+ smu8_ps = cast_smu8_power_state(&ps->hardware);
if (low)
- return cz_ps->levels[0].engineClock;
+ return smu8_ps->levels[0].engineClock;
else
- return cz_ps->levels[cz_ps->level-1].engineClock;
+ return smu8_ps->levels[smu8_ps->level-1].engineClock;
}
-static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
+ struct smu8_hwmgr *data = hwmgr->backend;
+ struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
- cz_ps->level = 1;
- cz_ps->nbps_flags = 0;
- cz_ps->bapm_flags = 0;
- cz_ps->levels[0] = cz_hwmgr->boot_power_level;
+ smu8_ps->level = 1;
+ smu8_ps->nbps_flags = 0;
+ smu8_ps->bapm_flags = 0;
+ smu8_ps->levels[0] = data->boot_power_level;
return 0;
}
-static int cz_dpm_get_pp_table_entry_callback(
+static int smu8_dpm_get_pp_table_entry_callback(
struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps,
unsigned int index,
const void *clock_info)
{
- struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
+ struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
- const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info;
+ const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
- uint8_t clock_info_index = cz_clock_info->index;
+ uint8_t clock_info_index = smu8_clock_info->index;
if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
- cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
- cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
+ smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
+ smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
- cz_ps->level = index + 1;
+ smu8_ps->level = index + 1;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- cz_ps->levels[index].dsDividerIndex = 5;
- cz_ps->levels[index].ssDividerIndex = 5;
+ smu8_ps->levels[index].dsDividerIndex = 5;
+ smu8_ps->levels[index].ssDividerIndex = 5;
}
return 0;
}
-static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
{
int result;
unsigned long ret = 0;
@@ -1399,31 +1370,31 @@ static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
return result ? 0 : ret;
}
-static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
unsigned long entry, struct pp_power_state *ps)
{
int result;
- struct cz_power_state *cz_ps;
+ struct smu8_power_state *smu8_ps;
- ps->hardware.magic = PhwCz_Magic;
+ ps->hardware.magic = smu8_magic;
- cz_ps = cast_PhwCzPowerState(&(ps->hardware));
+ smu8_ps = cast_smu8_power_state(&(ps->hardware));
result = pp_tables_get_entry(hwmgr, entry, ps,
- cz_dpm_get_pp_table_entry_callback);
+ smu8_dpm_get_pp_table_entry_callback);
- cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
- cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
+ smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
+ smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
return result;
}
-static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
{
- return sizeof(struct cz_power_state);
+ return sizeof(struct smu8_power_state);
}
-static void cz_hw_print_display_cfg(
+static void smu8_hw_print_display_cfg(
const struct cc6_settings *cc6_settings)
{
PP_DBG_LOG("New Display Configuration:\n");
@@ -1438,16 +1409,16 @@ static void cz_hw_print_display_cfg(
cc6_settings->cpu_pstate_separation_time);
}
- static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr)
+ static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
uint32_t data = 0;
if (hw_data->cc6_settings.cc6_setting_changed) {
hw_data->cc6_settings.cc6_setting_changed = false;
- cz_hw_print_display_cfg(&hw_data->cc6_settings);
+ smu8_hw_print_display_cfg(&hw_data->cc6_settings);
data |= (hw_data->cc6_settings.cpu_pstate_separation_time
& PWRMGT_SEPARATION_TIME_MASK)
@@ -1471,10 +1442,10 @@ static void cz_hw_print_display_cfg(
}
-static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
+static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
if (separation_time !=
hw_data->cc6_settings.cpu_pstate_separation_time ||
@@ -1498,7 +1469,7 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
return 0;
}
-static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
+static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
struct amd_pp_simple_clock_info *info)
{
uint32_t i;
@@ -1519,7 +1490,7 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
-static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
+static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
switch (type) {
@@ -1538,10 +1509,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
+static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
- struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *sclk_table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
int i, now, size = 0;
@@ -1566,10 +1537,10 @@ static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
TARGET_AND_CURRENT_PROFILE_INDEX,
CURR_MCLK_INDEX);
- for (i = CZ_NUM_NBPMEMORYCLOCK; i > 0; i--)
+ for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- CZ_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
- (CZ_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
+ SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
+ (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
break;
default:
break;
@@ -1577,20 +1548,20 @@ static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
-static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
- const struct cz_power_state *ps;
- struct cz_hwmgr *data;
+ const struct smu8_power_state *ps;
+ struct smu8_hwmgr *data;
uint32_t level_index;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
- data = (struct cz_hwmgr *)(hwmgr->backend);
- ps = cast_const_PhwCzPowerState(state);
+ data = hwmgr->backend;
+ ps = cast_const_smu8_power_state(state);
level_index = index > ps->level - 1 ? ps->level - 1 : index;
level->coreClock = ps->levels[level_index].engineClock;
@@ -1605,21 +1576,21 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
}
if (level_index == 0)
- level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1];
+ level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
else
level->memory_clock = data->sys_info.nbp_memory_clock[0];
- level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
+ level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
level->nonLocalMemoryFreq = 0;
level->nonLocalMemoryWidth = 0;
return 0;
}
-static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
+static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
{
- const struct cz_power_state *ps = cast_const_PhwCzPowerState(state);
+ const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
@@ -1627,14 +1598,14 @@ static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
- struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
int i;
struct phm_clock_voltage_dependency_table *table;
- clocks->count = cz_get_max_sclk_level(hwmgr);
+ clocks->count = smu8_get_max_sclk_level(hwmgr);
switch (type) {
case amd_pp_disp_clock:
for (i = 0; i < clocks->count; i++)
@@ -1646,7 +1617,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t
clocks->clock[i] = table->entries[i].clk;
break;
case amd_pp_mem_clock:
- clocks->count = CZ_NUM_NBPMEMORYCLOCK;
+ clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
for (i = 0; i < clocks->count; i++)
clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
break;
@@ -1657,7 +1628,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t
return 0;
}
-static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
{
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -1668,7 +1639,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
return -EINVAL;
- level = cz_get_max_sclk_level(hwmgr) - 1;
+ level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clocks->engine_max_clock = table->entries[level].clk;
@@ -1680,7 +1651,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
-static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int actual_temp = 0;
uint32_t val = cgs_read_ind_register(hwmgr->device,
@@ -1695,10 +1666,10 @@ static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
return actual_temp;
}
-static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -1736,18 +1707,18 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_VDDNB:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
*((uint32_t *)value) = vddnb;
return 0;
case AMDGPU_PP_SENSOR_VDDGFX:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
*((uint32_t *)value) = vddgfx;
return 0;
case AMDGPU_PP_SENSOR_UVD_VCLK:
- if (!cz_hwmgr->uvd_power_gated) {
- if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ if (!data->uvd_power_gated) {
+ if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
vclk = uvd_table->entries[uvd_index].vclk;
@@ -1758,8 +1729,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_UVD_DCLK:
- if (!cz_hwmgr->uvd_power_gated) {
- if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ if (!data->uvd_power_gated) {
+ if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
dclk = uvd_table->entries[uvd_index].dclk;
@@ -1770,8 +1741,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_VCE_ECCLK:
- if (!cz_hwmgr->vce_power_gated) {
- if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ if (!data->vce_power_gated) {
+ if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
ecclk = vce_table->entries[vce_index].ecclk;
@@ -1792,20 +1763,20 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = activity_percent;
return 0;
case AMDGPU_PP_SENSOR_UVD_POWER:
- *((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1;
+ *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
return 0;
case AMDGPU_PP_SENSOR_VCE_POWER:
- *((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1;
+ *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
return 0;
case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = cz_thermal_get_temperature(hwmgr);
+ *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
return 0;
default:
return -EINVAL;
}
}
-static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
uint32_t virtual_addr_hi,
uint32_t mc_addr_low,
@@ -1831,56 +1802,190 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
- thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold +
- cz_hwmgr->sys_info.htc_hyst_lmt) *
+ thermal_data->max = (data->thermal_auto_throttling_treshold +
+ data->sys_info.htc_hyst_lmt) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
-static const struct pp_hwmgr_func cz_hwmgr_funcs = {
- .backend_init = cz_hwmgr_backend_init,
- .backend_fini = cz_hwmgr_backend_fini,
- .apply_state_adjust_rules = cz_apply_state_adjust_rules,
- .force_dpm_level = cz_dpm_force_dpm_level,
- .get_power_state_size = cz_get_power_state_size,
- .powerdown_uvd = cz_dpm_powerdown_uvd,
- .powergate_uvd = cz_dpm_powergate_uvd,
- .powergate_vce = cz_dpm_powergate_vce,
- .get_mclk = cz_dpm_get_mclk,
- .get_sclk = cz_dpm_get_sclk,
- .patch_boot_state = cz_dpm_patch_boot_state,
- .get_pp_table_entry = cz_dpm_get_pp_table_entry,
- .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
- .set_cpu_power_state = cz_set_cpu_power_state,
- .store_cc6_data = cz_store_cc6_data,
- .force_clock_level = cz_force_clock_level,
- .print_clock_levels = cz_print_clock_levels,
- .get_dal_power_level = cz_get_dal_power_level,
- .get_performance_level = cz_get_performance_level,
- .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
- .get_clock_by_type = cz_get_clock_by_type,
- .get_max_high_clocks = cz_get_max_high_clocks,
- .read_sensor = cz_read_sensor,
- .power_off_asic = cz_power_off_asic,
- .asic_setup = cz_setup_asic_task,
- .dynamic_state_management_enable = cz_enable_dpm_tasks,
- .power_state_set = cz_set_power_state_tasks,
- .dynamic_state_management_disable = cz_disable_dpm_tasks,
- .notify_cac_buffer_info = cz_notify_cac_buffer_info,
- .get_thermal_temperature_range = cz_get_thermal_temperature_range,
+static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+ uint32_t dpm_features = 0;
+
+ if (enable &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM)) {
+ data->dpm_flags |= DPMFlags_UVD_Enabled;
+ dpm_features |= UVD_DPM_MASK;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ } else {
+ dpm_features |= UVD_DPM_MASK;
+ data->dpm_flags &= ~DPMFlags_UVD_Enabled;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ }
+ return 0;
+}
+
+int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+ struct phm_uvd_clock_voltage_dependency_table *ptable =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ if (!bgate) {
+ /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
+ if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+ hwmgr->en_umd_pstate) {
+ data->uvd_dpm.hard_min_clk =
+ ptable->entries[ptable->count - 1].vclk;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUvdHardMin,
+ smu8_get_uvd_level(hwmgr,
+ data->uvd_dpm.hard_min_clk,
+ PPSMC_MSG_SetUvdHardMin));
+
+ smu8_enable_disable_uvd_dpm(hwmgr, true);
+ } else {
+ smu8_enable_disable_uvd_dpm(hwmgr, true);
+ }
+ } else {
+ smu8_enable_disable_uvd_dpm(hwmgr, false);
+ }
+
+ return 0;
+}
+
+static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+ uint32_t dpm_features = 0;
+
+ if (enable && phm_cap_enabled(
+ hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEDPM)) {
+ data->dpm_flags |= DPMFlags_VCE_Enabled;
+ dpm_features |= VCE_DPM_MASK;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ } else {
+ dpm_features |= VCE_DPM_MASK;
+ data->dpm_flags &= ~DPMFlags_VCE_Enabled;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ }
+
+ return 0;
+}
+
+
+static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+
+ data->uvd_power_gated = bgate;
+
+ if (bgate) {
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
+ smu8_dpm_update_uvd_dpm(hwmgr, true);
+ smu8_dpm_powerdown_uvd(hwmgr);
+ } else {
+ smu8_dpm_powerup_uvd(hwmgr);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
+ smu8_dpm_update_uvd_dpm(hwmgr, false);
+ }
+
+}
+
+static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+
+ if (bgate) {
+ cgs_set_powergating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ cgs_set_clockgating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ smu8_enable_disable_vce_dpm(hwmgr, false);
+ smu8_dpm_powerdown_vce(hwmgr);
+ data->vce_power_gated = true;
+ } else {
+ smu8_dpm_powerup_vce(hwmgr);
+ data->vce_power_gated = false;
+ cgs_set_clockgating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ smu8_dpm_update_vce_dpm(hwmgr);
+ smu8_enable_disable_vce_dpm(hwmgr, true);
+ }
+}
+
+static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
+ .backend_init = smu8_hwmgr_backend_init,
+ .backend_fini = smu8_hwmgr_backend_fini,
+ .apply_state_adjust_rules = smu8_apply_state_adjust_rules,
+ .force_dpm_level = smu8_dpm_force_dpm_level,
+ .get_power_state_size = smu8_get_power_state_size,
+ .powerdown_uvd = smu8_dpm_powerdown_uvd,
+ .powergate_uvd = smu8_dpm_powergate_uvd,
+ .powergate_vce = smu8_dpm_powergate_vce,
+ .get_mclk = smu8_dpm_get_mclk,
+ .get_sclk = smu8_dpm_get_sclk,
+ .patch_boot_state = smu8_dpm_patch_boot_state,
+ .get_pp_table_entry = smu8_dpm_get_pp_table_entry,
+ .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
+ .set_cpu_power_state = smu8_set_cpu_power_state,
+ .store_cc6_data = smu8_store_cc6_data,
+ .force_clock_level = smu8_force_clock_level,
+ .print_clock_levels = smu8_print_clock_levels,
+ .get_dal_power_level = smu8_get_dal_power_level,
+ .get_performance_level = smu8_get_performance_level,
+ .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
+ .get_clock_by_type = smu8_get_clock_by_type,
+ .get_max_high_clocks = smu8_get_max_high_clocks,
+ .read_sensor = smu8_read_sensor,
+ .power_off_asic = smu8_power_off_asic,
+ .asic_setup = smu8_setup_asic_task,
+ .dynamic_state_management_enable = smu8_enable_dpm_tasks,
+ .power_state_set = smu8_set_power_state_tasks,
+ .dynamic_state_management_disable = smu8_disable_dpm_tasks,
+ .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
+ .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
};
-int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
+int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
{
- hwmgr->hwmgr_func = &cz_hwmgr_funcs;
+ hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
index b56720a3fc88..05a06083e1b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
@@ -21,18 +21,18 @@
*
*/
-#ifndef _CZ_HWMGR_H_
-#define _CZ_HWMGR_H_
+#ifndef _SMU8_HWMGR_H_
+#define _SMU8_HWMGR_H_
#include "cgs_common.h"
#include "ppatomctrl.h"
-#define CZ_NUM_NBPSTATES 4
-#define CZ_NUM_NBPMEMORYCLOCK 2
+#define SMU8_NUM_NBPSTATES 4
+#define SMU8_NUM_NBPMEMORYCLOCK 2
#define MAX_DISPLAY_CLOCK_LEVEL 8
-#define CZ_MAX_HARDWARE_POWERLEVELS 8
-#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
-#define CZ_MIN_DEEP_SLEEP_SCLK 800
+#define SMU8_MAX_HARDWARE_POWERLEVELS 8
+#define SMU8_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
+#define SMU8_MIN_DEEP_SLEEP_SCLK 800
/* Carrizo device IDs */
#define DEVICE_ID_CZ_9870 0x9870
@@ -41,24 +41,21 @@
#define DEVICE_ID_CZ_9876 0x9876
#define DEVICE_ID_CZ_9877 0x9877
-#define PHMCZ_WRITE_SMC_REGISTER(device, reg, value) \
- cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value)
-
-struct cz_dpm_entry {
+struct smu8_dpm_entry {
uint32_t soft_min_clk;
uint32_t hard_min_clk;
uint32_t soft_max_clk;
uint32_t hard_max_clk;
};
-struct cz_sys_info {
+struct smu8_sys_info {
uint32_t bootup_uma_clock;
uint32_t bootup_engine_clock;
uint32_t dentist_vco_freq;
uint32_t nb_dpm_enable;
- uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK];
- uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
- uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES];
+ uint32_t nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK];
+ uint32_t nbp_n_clock[SMU8_NUM_NBPSTATES];
+ uint16_t nbp_voltage_index[SMU8_NUM_NBPSTATES];
uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL];
uint16_t bootup_nb_voltage_index;
uint8_t htc_tmp_lmt;
@@ -85,21 +82,21 @@ struct cz_sys_info {
((tx) ? DISPLAYPHY_TX_SELECT : 0) | \
((core) ? DISPLAYPHY_CORE_SELECT : 0))
-struct cz_display_phy_info_entry {
+struct smu8_display_phy_info_entry {
uint8_t phy_present;
uint8_t active_lane_mapping;
uint8_t display_config_type;
uint8_t active_number_of_lanes;
};
-#define CZ_MAX_DISPLAYPHY_IDS 10
+#define SMU8_MAX_DISPLAYPHY_IDS 10
-struct cz_display_phy_info {
+struct smu8_display_phy_info {
bool display_phy_access_initialized;
- struct cz_display_phy_info_entry entries[CZ_MAX_DISPLAYPHY_IDS];
+ struct smu8_display_phy_info_entry entries[SMU8_MAX_DISPLAYPHY_IDS];
};
-struct cz_power_level {
+struct smu8_power_level {
uint32_t engineClock;
uint8_t vddcIndex;
uint8_t dsDividerIndex;
@@ -113,7 +110,7 @@ struct cz_power_level {
uint8_t rsv[3];
};
-struct cz_uvd_clocks {
+struct smu8_uvd_clocks {
uint32_t vclk;
uint32_t dclk;
uint32_t vclk_low_divider;
@@ -122,7 +119,7 @@ struct cz_uvd_clocks {
uint32_t dclk_high_divider;
};
-enum cz_pstate_previous_action {
+enum smu8_pstate_previous_action {
DO_NOTHING = 1,
FORCE_HIGH,
CANCEL_FORCE_HIGH
@@ -143,10 +140,10 @@ struct pp_disable_nb_ps_flags {
};
};
-struct cz_power_state {
+struct smu8_power_state {
unsigned int magic;
uint32_t level;
- struct cz_uvd_clocks uvd_clocks;
+ struct smu8_uvd_clocks uvd_clocks;
uint32_t evclk;
uint32_t ecclk;
uint32_t samclk;
@@ -158,8 +155,8 @@ struct cz_power_state {
uint8_t dpm_0_pg_nb_ps_high;
uint8_t dpm_x_nb_ps_low;
uint8_t dpm_x_nb_ps_high;
- enum cz_pstate_previous_action action;
- struct cz_power_level levels[CZ_MAX_HARDWARE_POWERLEVELS];
+ enum smu8_pstate_previous_action action;
+ struct smu8_power_level levels[SMU8_MAX_HARDWARE_POWERLEVELS];
struct pp_disable_nb_ps_flags disable_nb_ps_flag;
};
@@ -182,7 +179,7 @@ struct cc6_settings {
uint32_t cpu_pstate_separation_time;
};
-struct cz_hwmgr {
+struct smu8_hwmgr {
uint32_t dpm_interval;
uint32_t voltage_drop_threshold;
@@ -202,11 +199,11 @@ struct cz_hwmgr {
uint32_t thermal_auto_throttling_treshold;
- struct cz_sys_info sys_info;
+ struct smu8_sys_info sys_info;
- struct cz_power_level boot_power_level;
- struct cz_power_state *cz_current_ps;
- struct cz_power_state *cz_requested_ps;
+ struct smu8_power_level boot_power_level;
+ struct smu8_power_state *smu8_current_ps;
+ struct smu8_power_state *smu8_requested_ps;
uint32_t mgcg_cgtt_local0;
uint32_t mgcg_cgtt_local1;
@@ -219,7 +216,7 @@ struct cz_hwmgr {
uint32_t lock_nb_ps_in_uvd_play_back;
- struct cz_display_phy_info display_phy_info;
+ struct smu8_display_phy_info display_phy_info;
uint32_t vce_slow_sclk_threshold; /* default 200mhz */
uint32_t dce_slow_sclk_threshold; /* default 300mhz */
uint32_t min_sclk_did; /* minimum sclk divider */
@@ -270,10 +267,10 @@ struct cz_hwmgr {
uint32_t fps_low_threshold;
uint32_t dpm_flags;
- struct cz_dpm_entry sclk_dpm;
- struct cz_dpm_entry uvd_dpm;
- struct cz_dpm_entry vce_dpm;
- struct cz_dpm_entry acp_dpm;
+ struct smu8_dpm_entry sclk_dpm;
+ struct smu8_dpm_entry uvd_dpm;
+ struct smu8_dpm_entry vce_dpm;
+ struct smu8_dpm_entry acp_dpm;
uint8_t uvd_boot_level;
uint8_t vce_boot_level;
@@ -311,12 +308,4 @@ struct cz_hwmgr {
uint32_t num_of_clk_entries;
};
-struct pp_hwmgr;
-
-int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr);
-int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr);
-#endif /* _CZ_HWMGR_H_ */
+#endif /* _SMU8_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
new file mode 100644
index 000000000000..e11daf5cbf80
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "hwmgr.h"
+#include "pp_debug.h"
+#include "ppatomctrl.h"
+#include "ppsmc.h"
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+ return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
+
+uint16_t convert_to_vddc(uint8_t vid)
+{
+ return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
+}
+
+uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
+{
+ u32 mask = 0;
+ u32 shift = 0;
+
+ shift = (offset % 4) << 3;
+ if (size == sizeof(uint8_t))
+ mask = 0xFF << shift;
+ else if (size == sizeof(uint16_t))
+ mask = 0xFFFF << shift;
+
+ original_data &= ~mask;
+ original_data |= (field << shift);
+ return original_data;
+}
+
+/**
+ * Returns once the part of the register indicated by the mask has
+ * reached the given value.
+ */
+int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
+ uint32_t value, uint32_t mask)
+{
+ uint32_t i;
+ uint32_t cur_value;
+
+ if (hwmgr == NULL || hwmgr->device == NULL) {
+ pr_err("Invalid Hardware Manager!");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ cur_value = cgs_read_register(hwmgr->device, index);
+ if ((cur_value & mask) == (value & mask))
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic*/
+ if (i == hwmgr->usec_timeout)
+ return -1;
+ return 0;
+}
+
+
+/**
+ * Returns once the part of the register indicated by the mask has
+ * reached the given value.The indirect space is described by giving
+ * the memory-mapped index of the indirect index register.
+ */
+int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL) {
+ pr_err("Invalid Hardware Manager!");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, indirect_port, index);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+}
+
+int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask)
+{
+ uint32_t i;
+ uint32_t cur_value;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ cur_value = cgs_read_register(hwmgr->device,
+ index);
+ if ((cur_value & mask) != (value & mask))
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == hwmgr->usec_timeout)
+ return -ETIME;
+ return 0;
+}
+
+int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, indirect_port, index);
+ return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
+ value, mask);
+}
+
+bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
+{
+ return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
+}
+
+bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
+{
+ return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
+}
+
+
+int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
+{
+ uint32_t i, j;
+ uint16_t vvalue;
+ bool found = false;
+ struct pp_atomctrl_voltage_table *table;
+
+ PP_ASSERT_WITH_CODE((NULL != vol_table),
+ "Voltage Table empty.", return -EINVAL);
+
+ table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
+ GFP_KERNEL);
+
+ if (NULL == table)
+ return -EINVAL;
+
+ table->mask_low = vol_table->mask_low;
+ table->phase_delay = vol_table->phase_delay;
+
+ for (i = 0; i < vol_table->count; i++) {
+ vvalue = vol_table->entries[i].value;
+ found = false;
+
+ for (j = 0; j < table->count; j++) {
+ if (vvalue == table->entries[j].value) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ table->entries[table->count].value = vvalue;
+ table->entries[table->count].smio_low =
+ vol_table->entries[i].smio_low;
+ table->count++;
+ }
+ }
+
+ memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
+ kfree(table);
+ table = NULL;
+ return 0;
+}
+
+int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+ phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+{
+ uint32_t i;
+ int result;
+
+ PP_ASSERT_WITH_CODE((0 != dep_table->count),
+ "Voltage Dependency Table empty.", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE((NULL != vol_table),
+ "vol_table empty.", return -EINVAL);
+
+ vol_table->mask_low = 0;
+ vol_table->phase_delay = 0;
+ vol_table->count = dep_table->count;
+
+ for (i = 0; i < dep_table->count; i++) {
+ vol_table->entries[i].value = dep_table->entries[i].mvdd;
+ vol_table->entries[i].smio_low = 0;
+ }
+
+ result = phm_trim_voltage_table(vol_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to trim MVDD table.", return result);
+
+ return 0;
+}
+
+int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+ phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+{
+ uint32_t i;
+ int result;
+
+ PP_ASSERT_WITH_CODE((0 != dep_table->count),
+ "Voltage Dependency Table empty.", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE((NULL != vol_table),
+ "vol_table empty.", return -EINVAL);
+
+ vol_table->mask_low = 0;
+ vol_table->phase_delay = 0;
+ vol_table->count = dep_table->count;
+
+ for (i = 0; i < dep_table->count; i++) {
+ vol_table->entries[i].value = dep_table->entries[i].vddci;
+ vol_table->entries[i].smio_low = 0;
+ }
+
+ result = phm_trim_voltage_table(vol_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to trim VDDCI table.", return result);
+
+ return 0;
+}
+
+int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+ phm_ppt_v1_voltage_lookup_table *lookup_table)
+{
+ int i = 0;
+
+ PP_ASSERT_WITH_CODE((0 != lookup_table->count),
+ "Voltage Lookup Table empty.", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE((NULL != vol_table),
+ "vol_table empty.", return -EINVAL);
+
+ vol_table->mask_low = 0;
+ vol_table->phase_delay = 0;
+
+ vol_table->count = lookup_table->count;
+
+ for (i = 0; i < vol_table->count; i++) {
+ vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
+ vol_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
+ struct pp_atomctrl_voltage_table *vol_table)
+{
+ unsigned int i, diff;
+
+ if (vol_table->count <= max_vol_steps)
+ return;
+
+ diff = vol_table->count - max_vol_steps;
+
+ for (i = 0; i < max_vol_steps; i++)
+ vol_table->entries[i] = vol_table->entries[i + diff];
+
+ vol_table->count = max_vol_steps;
+
+ return;
+}
+
+int phm_reset_single_dpm_table(void *table,
+ uint32_t count, int max)
+{
+ int i;
+
+ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+
+ dpm_table->count = count > max ? max : count;
+
+ for (i = 0; i < dpm_table->count; i++)
+ dpm_table->dpm_level[i].enabled = false;
+
+ return 0;
+}
+
+void phm_setup_pcie_table_entry(
+ void *table,
+ uint32_t index, uint32_t pcie_gen,
+ uint32_t pcie_lanes)
+{
+ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+ dpm_table->dpm_level[index].value = pcie_gen;
+ dpm_table->dpm_level[index].param1 = pcie_lanes;
+ dpm_table->dpm_level[index].enabled = 1;
+}
+
+int32_t phm_get_dpm_level_enable_mask_value(void *table)
+{
+ int32_t i;
+ int32_t mask = 0;
+ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+
+ for (i = dpm_table->count; i > 0; i--) {
+ mask = mask << 1;
+ if (dpm_table->dpm_level[i - 1].enabled)
+ mask |= 0x1;
+ else
+ mask &= 0xFFFFFFFE;
+ }
+
+ return mask;
+}
+
+uint8_t phm_get_voltage_index(
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
+{
+ uint8_t count = (uint8_t) (lookup_table->count);
+ uint8_t i;
+
+ PP_ASSERT_WITH_CODE((NULL != lookup_table),
+ "Lookup Table empty.", return 0);
+ PP_ASSERT_WITH_CODE((0 != count),
+ "Lookup Table empty.", return 0);
+
+ for (i = 0; i < lookup_table->count; i++) {
+ /* find first voltage equal or bigger than requested */
+ if (lookup_table->entries[i].us_vdd >= voltage)
+ return i;
+ }
+ /* voltage is bigger than max voltage in the table */
+ return i - 1;
+}
+
+uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+ uint32_t voltage)
+{
+ uint8_t count = (uint8_t) (voltage_table->count);
+ uint8_t i = 0;
+
+ PP_ASSERT_WITH_CODE((NULL != voltage_table),
+ "Voltage Table empty.", return 0;);
+ PP_ASSERT_WITH_CODE((0 != count),
+ "Voltage Table empty.", return 0;);
+
+ for (i = 0; i < count; i++) {
+ /* find first voltage bigger than requested */
+ if (voltage_table->entries[i].value >= voltage)
+ return i;
+ }
+
+ /* voltage is bigger than max voltage in the table */
+ return i - 1;
+}
+
+uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
+{
+ uint32_t i;
+
+ for (i = 0; i < vddci_table->count; i++) {
+ if (vddci_table->entries[i].value >= vddci)
+ return vddci_table->entries[i].value;
+ }
+
+ pr_debug("vddci is larger than max value in vddci_table\n");
+ return vddci_table->entries[i-1].value;
+}
+
+int phm_find_boot_level(void *table,
+ uint32_t value, uint32_t *boot_level)
+{
+ int result = -EINVAL;
+ uint32_t i;
+ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if (value == dpm_table->dpm_level[i].value) {
+ *boot_level = i;
+ result = 0;
+ }
+ }
+
+ return result;
+}
+
+int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_voltage_lookup_table *lookup_table,
+ uint16_t virtual_voltage_id, int32_t *sclk)
+{
+ uint8_t entry_id;
+ uint8_t voltage_id;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
+ for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
+ voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
+ if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
+ break;
+ }
+
+ if (entry_id >= table_info->vdd_dep_on_sclk->count) {
+ pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
+ return -EINVAL;
+ }
+
+ *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
+
+ return 0;
+}
+
+/**
+ * Initialize Dynamic State Adjustment Rule Settings
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ */
+int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
+{
+ uint32_t table_size;
+ struct phm_clock_voltage_dependency_table *table_clk_vlt;
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* initialize vddc_dep_on_dal_pwrl table */
+ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
+ table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
+
+ if (NULL == table_clk_vlt) {
+ pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
+ return -ENOMEM;
+ } else {
+ table_clk_vlt->count = 4;
+ table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
+ table_clk_vlt->entries[0].v = 0;
+ table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
+ table_clk_vlt->entries[1].v = 720;
+ table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
+ table_clk_vlt->entries[2].v = 810;
+ table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
+ table_clk_vlt->entries[3].v = 900;
+ if (pptable_info != NULL)
+ pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
+ }
+
+ return 0;
+}
+
+uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
+{
+ uint32_t level = 0;
+
+ while (0 == (mask & (1 << level)))
+ level++;
+
+ return level;
+}
+
+void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_clock_voltage_dependency_table *table =
+ table_info->vddc_dep_on_dal_pwrl;
+ struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
+ enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
+ uint32_t req_vddc = 0, req_volt, i;
+
+ if (!table || table->count <= 0
+ || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
+ || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
+ return;
+
+ for (i = 0; i < table->count; i++) {
+ if (dal_power_level == table->entries[i].clk) {
+ req_vddc = table->entries[i].v;
+ break;
+ }
+ }
+
+ vddc_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < vddc_table->count; i++) {
+ if (req_vddc <= vddc_table->entries[i].vddc) {
+ req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VddC_Request, req_volt);
+ return;
+ }
+ }
+ pr_err("DAL requested level can not"
+ " found a available voltage in VDDC DPM Table \n");
+}
+
+int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t id, uint16_t *voltage)
+{
+ uint32_t vol;
+ int ret = 0;
+
+ if (hwmgr->chip_id < CHIP_TONGA) {
+ ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
+ } else if (hwmgr->chip_id < CHIP_POLARIS10) {
+ ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+ if (*voltage >= 2000 || *voltage == 0)
+ *voltage = 1150;
+ } else {
+ ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+ *voltage = (uint16_t)(vol/100);
+ }
+ return ret;
+}
+
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
new file mode 100644
index 000000000000..a1a491300348
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU_HELPER_H_
+#define _SMU_HELPER_H_
+
+struct pp_atomctrl_voltage_table;
+struct pp_hwmgr;
+struct phm_ppt_v1_voltage_lookup_table;
+
+extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask);
+extern int phm_wait_for_indirect_register_unequal(
+ struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port, uint32_t index,
+ uint32_t value, uint32_t mask);
+
+
+extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
+extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
+extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
+
+extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
+extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
+extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
+extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
+extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
+extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+ uint32_t voltage);
+extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
+extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
+extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
+extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
+ uint16_t virtual_voltage_id, int32_t *sclk);
+extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
+extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
+extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
+
+extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t id, uint16_t *voltage);
+
+extern uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size);
+
+extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
+ uint32_t value, uint32_t mask);
+
+extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask);
+
+#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
+#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
+
+#define PHM_SET_FIELD(origval, reg, field, fieldval) \
+ (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
+ (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
+
+#define PHM_GET_FIELD(value, reg, field) \
+ (((value) & PHM_FIELD_MASK(reg, field)) >> \
+ PHM_FIELD_SHIFT(reg, field))
+
+
+/* Operations on named fields. */
+
+#define PHM_READ_FIELD(device, reg, field) \
+ PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
+
+#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
+ PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field)
+
+#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
+ PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field)
+
+#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
+ cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
+ cgs_read_register(device, mm##reg), reg, field, fieldval))
+
+#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
+ cgs_write_ind_register(device, port, ix##reg, \
+ PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field, fieldval))
+
+#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
+ cgs_write_ind_register(device, port, ix##reg, \
+ PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field, fieldval))
+
+#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
+
+
+#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
+ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX, index, value, mask)
+
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field) )
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ index, value, mask) \
+ phm_wait_for_register_unequal(hwmgr, \
+ index, value, mask)
+
+#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
+ PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ mm##reg, value, mask)
+
+#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
+ PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+#endif /* _SMU_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f23861f2c685..2fcbb17b794d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4874,12 +4874,12 @@ static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
hwmgr->thermal_controller.ucType ==
ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
- 0xf, /* AMDGPU_IH_CLIENTID_THM */
+ SOC15_IH_CLIENTID_THM,
0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
"Failed to register high thermal interrupt!",
return -EINVAL);
PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
- 0xf, /* AMDGPU_IH_CLIENTID_THM */
+ SOC15_IH_CLIENTID_THM,
1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
"Failed to register low thermal interrupt!",
return -EINVAL);
@@ -4887,7 +4887,7 @@ static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
/* Register CTF(GPIO_19) interrupt */
PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
- 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
+ SOC15_IH_CLIENTID_ROM_SMUIO,
83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
"Failed to register CTF thermal interrupt!",
return -EINVAL);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index b151ad85666a..85b46ad68546 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -25,16 +25,14 @@
#include <linux/seq_file.h>
#include "amd_powerplay.h"
-#include "pp_instance.h"
#include "hardwaremanager.h"
#include "pp_power_source.h"
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
#include "power_state.h"
-#include "cgs_linux.h"
+#include "smu_helper.h"
-struct pp_instance;
struct pp_hwmgr;
struct phm_fan_speed_info;
struct pp_atomctrl_voltage_table;
@@ -237,6 +235,7 @@ struct pp_smumgr_func {
bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
+ int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
};
struct pp_hwmgr_func {
@@ -702,6 +701,8 @@ struct pp_hwmgr {
uint32_t chip_family;
uint32_t chip_id;
uint32_t smu_version;
+ bool pm_en;
+ struct mutex smu_lock;
uint32_t pp_table_version;
void *device;
@@ -748,7 +749,7 @@ struct pp_hwmgr {
struct pp_power_state *uvd_ps;
struct amd_pp_display_configuration display_config;
uint32_t feature_mask;
-
+ bool avfs_supported;
/* UMD Pstate */
bool en_umd_pstate;
uint32_t power_profile_mode;
@@ -768,168 +769,17 @@ struct cgs_irq_src_funcs {
cgs_irq_handler_func_t handler;
};
-extern int hwmgr_early_init(struct pp_instance *handle);
-extern int hwmgr_hw_init(struct pp_instance *handle);
-extern int hwmgr_hw_fini(struct pp_instance *handle);
-extern int hwmgr_hw_suspend(struct pp_instance *handle);
-extern int hwmgr_hw_resume(struct pp_instance *handle);
-extern int hwmgr_handle_task(struct pp_instance *handle,
+extern int hwmgr_early_init(struct pp_hwmgr *hwmgr);
+extern int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
+extern int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
+extern int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr);
+extern int hwmgr_hw_resume(struct pp_hwmgr *hwmgr);
+extern int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
enum amd_pp_task task_id,
enum amd_pm_state_type *user_state);
-extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
- uint32_t value, uint32_t mask);
-
-extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask);
-
-extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t index,
- uint32_t value, uint32_t mask);
-extern int phm_wait_for_indirect_register_unequal(
- struct pp_hwmgr *hwmgr,
- uint32_t indirect_port, uint32_t index,
- uint32_t value, uint32_t mask);
-
-
-extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
-extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
-extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
-
-extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
-extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
-extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
-extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
-extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
-extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
-extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
-extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
-extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
- uint32_t voltage);
-extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
-extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
-extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
-extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, int32_t *sclk);
-extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
-extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
-
-extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
-extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
-
-extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t id, uint16_t *voltage);
-
-extern uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size);
-
-#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
-
-#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
-#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
-
-#define PHM_SET_FIELD(origval, reg, field, fieldval) \
- (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
- (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
-
-#define PHM_GET_FIELD(value, reg, field) \
- (((value) & PHM_FIELD_MASK(reg, field)) >> \
- PHM_FIELD_SHIFT(reg, field))
-
-
-/* Operations on named fields. */
-
-#define PHM_READ_FIELD(device, reg, field) \
- PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
-
-#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
- PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
- PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
- cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
- cgs_read_register(device, mm##reg), reg, field, fieldval))
-
-#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
- PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_for_indirect_register_unequal(hwmgr, \
- mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field) )
-
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
- port, index, value, mask) \
- phm_wait_for_indirect_register_unequal(hwmgr, \
- mm##port##_INDEX_11, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
-
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
- port, index, value, mask) \
- phm_wait_on_indirect_register(hwmgr, \
- mm##port##_INDEX_11, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
- index, value, mask) \
- phm_wait_for_register_unequal(hwmgr, \
- index, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
- mm##reg, value, mask)
+#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
-#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
- PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
deleted file mode 100644
index 6c2fa33bd63a..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _PP_INSTANCE_H_
-#define _PP_INSTANCE_H_
-
-struct pp_hwmgr;
-
-struct pp_instance {
- void *parent; /* e.g. amdgpu_device */
- void *device; /* e.g. cgs_device */
- bool pm_en;
- struct pp_hwmgr *hwmgr;
- struct mutex pp_lock;
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 9bba0a069ed6..fc3a2a533586 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -26,27 +26,6 @@
#include "amd_powerplay.h"
#include "hwmgr.h"
-enum AVFS_BTC_STATUS {
- AVFS_BTC_BOOT = 0,
- AVFS_BTC_BOOT_STARTEDSMU,
- AVFS_LOAD_VIRUS,
- AVFS_BTC_VIRUS_LOADED,
- AVFS_BTC_VIRUS_FAIL,
- AVFS_BTC_COMPLETED_PREVIOUSLY,
- AVFS_BTC_ENABLEAVFS,
- AVFS_BTC_STARTED,
- AVFS_BTC_FAILED,
- AVFS_BTC_RESTOREVFT_FAILED,
- AVFS_BTC_SAVEVFT_FAILED,
- AVFS_BTC_DPMTABLESETUP_FAILED,
- AVFS_BTC_COMPLETED_UNSAVED,
- AVFS_BTC_COMPLETED_SAVED,
- AVFS_BTC_COMPLETED_RESTORED,
- AVFS_BTC_DISABLED,
- AVFS_BTC_NOTSUPPORTED,
- AVFS_BTC_SMUMSG_ERROR
-};
-
enum SMU_TABLE {
SMU_UVD_TABLE = 0,
SMU_VCE_TABLE,
@@ -90,6 +69,11 @@ enum SMU_MAC_DEFINITION {
SMU_UVD_MCLK_HANDSHAKE_DISABLE,
};
+enum SMU10_TABLE_ID {
+ SMU10_WMTABLE = 0,
+ SMU10_CLOCKTABLE,
+};
+
extern int smum_get_argument(struct pp_hwmgr *hwmgr);
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
@@ -121,4 +105,6 @@ extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr);
extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting);
+extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 98e701e4f553..735c38624ce1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -23,9 +23,9 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
polaris10_smumgr.o iceland_smumgr.o \
- smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o
+ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
deleted file mode 100644
index 957739aa6db9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ /dev/null
@@ -1,883 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include "cgs_common.h"
-#include "smu/smu_8_0_d.h"
-#include "smu/smu_8_0_sh_mask.h"
-#include "smu8.h"
-#include "smu8_fusion.h"
-#include "cz_smumgr.h"
-#include "cz_ppsmc.h"
-#include "smu_ucode_xfer_cz.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-#include "smumgr.h"
-
-#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
-
-static const enum cz_scratch_entry firmware_list[] = {
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
-};
-
-static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- return cgs_read_register(hwmgr->device,
- mmSMU_MP1_SRBM2P_ARG_0);
-}
-
-static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- int result = 0;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
- SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
- if (result != 0) {
- pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
- return result;
- }
-
- cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
- cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
-
- return 0;
-}
-
-/* Send a message to the SMC, and wait for its response.*/
-static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- int result = 0;
-
- result = cz_send_msg_to_smc_async(hwmgr, msg);
- if (result != 0)
- return result;
-
- return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
- SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-}
-
-static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
- uint32_t smc_address, uint32_t limit)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- if (0 != (3 & smc_address)) {
- pr_err("SMC address must be 4 byte aligned\n");
- return -EINVAL;
- }
-
- if (limit <= (smc_address + 3)) {
- pr_err("SMC address beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
- SMN_MP1_SRAM_START_ADDR + smc_address);
-
- return 0;
-}
-
-static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
- uint32_t smc_address, uint32_t value, uint32_t limit)
-{
- int result;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
- if (!result)
- cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
-
- return result;
-}
-
-static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
-
- return cz_send_msg_to_smc(hwmgr, msg);
-}
-
-static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
- uint32_t firmware)
-{
- int i;
- uint32_t index = SMN_MP1_SRAM_START_ADDR +
- SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
-
- for (i = 0; i < hwmgr->usec_timeout; i++) {
- if (firmware ==
- (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
- break;
- udelay(1);
- }
-
- if (i >= hwmgr->usec_timeout) {
- pr_err("SMU check loaded firmware failed.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
-{
- uint32_t reg_data;
- uint32_t tmp;
- int ret = 0;
- struct cgs_firmware_info info = {0};
- struct cz_smumgr *cz_smu;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- ret = cgs_get_firmware_info(hwmgr->device,
- CGS_UCODE_ID_CP_MEC, &info);
-
- if (ret)
- return -EINVAL;
-
- /* Disable MEC parsing/prefetching */
- tmp = cgs_read_register(hwmgr->device,
- mmCP_MEC_CNTL);
- tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
-
- tmp = cgs_read_register(hwmgr->device,
- mmCP_CPC_IC_BASE_CNTL);
-
- tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
- tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
- tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
- tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
- cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
-
- reg_data = lower_32_bits(info.mc_addr) &
- PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
- cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
-
- reg_data = upper_32_bits(info.mc_addr) &
- PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
- cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
-
- return 0;
-}
-
-static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
- enum cz_scratch_entry firmware_enum)
-{
- uint8_t ret = 0;
-
- switch (firmware_enum) {
- case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
- ret = UCODE_ID_SDMA0;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
- if (hwmgr->chip_id == CHIP_STONEY)
- ret = UCODE_ID_SDMA0;
- else
- ret = UCODE_ID_SDMA1;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
- ret = UCODE_ID_CP_CE;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
- ret = UCODE_ID_CP_PFP;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
- ret = UCODE_ID_CP_ME;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
- ret = UCODE_ID_CP_MEC_JT1;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
- if (hwmgr->chip_id == CHIP_STONEY)
- ret = UCODE_ID_CP_MEC_JT1;
- else
- ret = UCODE_ID_CP_MEC_JT2;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
- ret = UCODE_ID_GMCON_RENG;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
- ret = UCODE_ID_RLC_G;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
- ret = UCODE_ID_RLC_SCRATCH;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
- ret = UCODE_ID_RLC_SRM_ARAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
- ret = UCODE_ID_RLC_SRM_DRAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
- ret = UCODE_ID_DMCU_ERAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
- ret = UCODE_ID_DMCU_IRAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
- ret = TASK_ARG_INIT_MM_PWR_LOG;
- break;
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
- case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
- case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
- case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
- ret = TASK_ARG_REG_MMIO;
- break;
- case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
- ret = TASK_ARG_INIT_CLK_TABLE;
- break;
- }
-
- return ret;
-}
-
-static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
-{
- enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- result = CGS_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = CGS_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = CGS_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = CGS_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = CGS_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = CGS_UCODE_ID_CP_MEC_JT1;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = CGS_UCODE_ID_CP_MEC_JT2;
- break;
- case UCODE_ID_RLC_G:
- result = CGS_UCODE_ID_RLC_G;
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static int cz_smu_populate_single_scratch_task(
- struct pp_hwmgr *hwmgr,
- enum cz_scratch_entry fw_enum,
- uint8_t type, bool is_last)
-{
- uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
- struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
-
- task->type = type;
- task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
- task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
-
- for (i = 0; i < cz_smu->scratch_buffer_length; i++)
- if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
- break;
-
- if (i >= cz_smu->scratch_buffer_length) {
- pr_err("Invalid Firmware Type\n");
- return -EINVAL;
- }
-
- task->addr.low = lower_32_bits(cz_smu->scratch_buffer[i].mc_addr);
- task->addr.high = upper_32_bits(cz_smu->scratch_buffer[i].mc_addr);
- task->size_bytes = cz_smu->scratch_buffer[i].data_size;
-
- if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
- struct cz_ih_meta_data *pIHReg_restore =
- (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
- pIHReg_restore->command =
- METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
- }
-
- return 0;
-}
-
-static int cz_smu_populate_single_ucode_load_task(
- struct pp_hwmgr *hwmgr,
- enum cz_scratch_entry fw_enum,
- bool is_last)
-{
- uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
- struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
-
- task->type = TASK_TYPE_UCODE_LOAD;
- task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
- task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
-
- for (i = 0; i < cz_smu->driver_buffer_length; i++)
- if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
- break;
-
- if (i >= cz_smu->driver_buffer_length) {
- pr_err("Invalid Firmware Type\n");
- return -EINVAL;
- }
-
- task->addr.low = lower_32_bits(cz_smu->driver_buffer[i].mc_addr);
- task->addr.high = upper_32_bits(cz_smu->driver_buffer[i].mc_addr);
- task->size_bytes = cz_smu->driver_buffer[i].data_size;
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- TASK_TYPE_UCODE_SAVE, true);
-
- return 0;
-}
-
-static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
- for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
- toc->JobList[i] = (uint8_t)IGNORE_JOB;
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
- toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- TASK_TYPE_UCODE_SAVE, false);
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- TASK_TYPE_UCODE_SAVE, true);
-
- return 0;
-}
-
-
-static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
- toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
-
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-
- if (hwmgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- else
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
-
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
-
- /* populate scratch */
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- TASK_TYPE_UCODE_LOAD, false);
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- TASK_TYPE_UCODE_LOAD, false);
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- TASK_TYPE_UCODE_LOAD, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- TASK_TYPE_INITIALIZE, true);
- return 0;
-}
-
-static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
-
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (hwmgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (hwmgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
- TASK_TYPE_INITIALIZE, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_used_count = 0;
- cz_smu_initialize_toc_empty_job_list(hwmgr);
- cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
- cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
- cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
- cz_smu_construct_toc_for_power_profiling(hwmgr);
- cz_smu_construct_toc_for_bootup(hwmgr);
- cz_smu_construct_toc_for_clock_table(hwmgr);
-
- return 0;
-}
-
-static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- uint32_t firmware_type;
- uint32_t i;
- int ret;
- enum cgs_ucode_id ucode_id;
- struct cgs_firmware_info info = {0};
-
- cz_smu->driver_buffer_length = 0;
-
- for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
-
- firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
- firmware_list[i]);
-
- ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
-
- ret = cgs_get_firmware_info(hwmgr->device,
- ucode_id, &info);
-
- if (ret == 0) {
- cz_smu->driver_buffer[i].mc_addr = info.mc_addr;
-
- cz_smu->driver_buffer[i].data_size = info.image_size;
-
- cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
- cz_smu->driver_buffer_length++;
- }
- }
-
- return 0;
-}
-
-static int cz_smu_populate_single_scratch_entry(
- struct pp_hwmgr *hwmgr,
- enum cz_scratch_entry scratch_type,
- uint32_t ulsize_byte,
- struct cz_buffer_entry *entry)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
-
- entry->data_size = ulsize_byte;
- entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
- cz_smu->smu_buffer_used_bytes;
- entry->mc_addr = cz_smu->smu_buffer.mc_addr + cz_smu->smu_buffer_used_bytes;
- entry->firmware_ID = scratch_type;
-
- cz_smu->smu_buffer_used_bytes += ulsize_aligned;
-
- return 0;
-}
-
-static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- unsigned long i;
-
- for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
- if (cz_smu->scratch_buffer[i].firmware_ID
- == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
- break;
- }
-
- *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetClkTableAddrHi,
- upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetClkTableAddrLo,
- lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
-
- cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_clock_table);
-
- cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
-
- return 0;
-}
-
-static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- unsigned long i;
-
- for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
- if (cz_smu->scratch_buffer[i].firmware_ID
- == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
- break;
- }
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetClkTableAddrHi,
- upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetClkTableAddrLo,
- lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
-
- cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_clock_table);
-
- cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
-
- return 0;
-}
-
-static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
- uint32_t smc_address;
-
- if (!hwmgr->reload_fw) {
- pr_info("skip reloading...\n");
- return 0;
- }
-
- cz_smu_populate_firmware_entries(hwmgr);
-
- cz_smu_construct_toc(hwmgr);
-
- smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
- cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DriverDramAddrHi,
- upper_32_bits(cz_smu->toc_buffer.mc_addr));
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DriverDramAddrLo,
- lower_32_bits(cz_smu->toc_buffer.mc_addr));
-
- cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_aram);
- cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_power_profiling_index);
-
- return cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_initialize_index);
-}
-
-static int cz_start_smu(struct pp_hwmgr *hwmgr)
-{
- int ret = 0;
- uint32_t fw_to_check = 0;
- struct cgs_firmware_info info = {0};
- uint32_t index = SMN_MP1_SRAM_START_ADDR +
- SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, Version);
-
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
- hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
- info.version = hwmgr->smu_version >> 8;
- cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
-
- fw_to_check = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_JT1_MASK |
- UCODE_ID_CP_MEC_JT2_MASK;
-
- if (hwmgr->chip_id == CHIP_STONEY)
- fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
-
- ret = cz_request_smu_load_fw(hwmgr);
- if (ret)
- pr_err("SMU firmware load failed\n");
-
- cz_check_fw_load_finish(hwmgr, fw_to_check);
-
- ret = cz_load_mec_firmware(hwmgr);
- if (ret)
- pr_err("Mec Firmware load failed\n");
-
- return ret;
-}
-
-static int cz_smu_init(struct pp_hwmgr *hwmgr)
-{
- int ret = 0;
- struct cz_smumgr *cz_smu;
-
- cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
- if (cz_smu == NULL)
- return -ENOMEM;
-
- hwmgr->smu_backend = cz_smu;
-
- cz_smu->toc_buffer.data_size = 4096;
- cz_smu->smu_buffer.data_size =
- ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
- ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
- ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
- ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
- ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
-
- ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- cz_smu->toc_buffer.data_size,
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &cz_smu->toc_buffer.handle,
- &cz_smu->toc_buffer.mc_addr,
- &cz_smu->toc_buffer.kaddr);
- if (ret)
- return -EINVAL;
-
- ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- cz_smu->smu_buffer.data_size,
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &cz_smu->smu_buffer.handle,
- &cz_smu->smu_buffer.mc_addr,
- &cz_smu->smu_buffer.kaddr);
- if (ret) {
- amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
- &cz_smu->toc_buffer.mc_addr,
- &cz_smu->toc_buffer.kaddr);
- return -EINVAL;
- }
-
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
-
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
-
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- sizeof(struct SMU8_MultimediaPowerLogData),
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
-
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
- sizeof(struct SMU8_Fusion_ClkTable),
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
-
- return 0;
-}
-
-static int cz_smu_fini(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- if (cz_smu) {
- amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
- &cz_smu->toc_buffer.mc_addr,
- &cz_smu->toc_buffer.kaddr);
- amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle,
- &cz_smu->smu_buffer.mc_addr,
- &cz_smu->smu_buffer.kaddr);
- kfree(cz_smu);
- }
-
- return 0;
-}
-
-static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
- unsigned long check_feature)
-{
- int result;
- unsigned long features;
-
- result = cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
- if (result == 0) {
- features = smum_get_argument(hwmgr);
- if (features & check_feature)
- return true;
- }
-
- return false;
-}
-
-static bool cz_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return true;
- return false;
-}
-
-const struct pp_smumgr_func cz_smu_funcs = {
- .smu_init = cz_smu_init,
- .smu_fini = cz_smu_fini,
- .start_smu = cz_start_smu,
- .check_fw_load_finish = cz_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_load_specific_fw = NULL,
- .get_argument = cz_smum_get_argument,
- .send_msg_to_smc = cz_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
- .download_pptable_settings = cz_download_pptable_settings,
- .upload_pptable_settings = cz_upload_pptable_settings,
- .is_dpm_running = cz_is_dpm_running,
-};
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 0b2b5d155e5e..95fcda37f890 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -205,9 +205,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
int result = 0;
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- if (0 != smu_data->avfs.avfs_btc_param) {
+ if (0 != smu_data->avfs_btc_param) {
if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
}
@@ -261,43 +261,21 @@ static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
return 0;
}
-static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
-
- switch (smu_data->avfs.avfs_btc_status) {
- case AVFS_BTC_COMPLETED_PREVIOUSLY:
- break;
+ PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
+ "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
+ " table over to SMU",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
+ "[AVFS][fiji_avfs_event_mgr] Could not setup "
+ "Pwr Virus for AVFS ",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
+ "[AVFS][fiji_avfs_event_mgr] Failure at "
+ "fiji_start_avfs_btc. AVFS Disabled",
+ return -EINVAL);
- case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/
- if (!smu_started)
- break;
- smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
- "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
- " table over to SMU",
- return -EINVAL;);
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
- "[AVFS][fiji_avfs_event_mgr] Could not setup "
- "Pwr Virus for AVFS ",
- return -EINVAL;);
- smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
- "[AVFS][fiji_avfs_event_mgr] Failure at "
- "fiji_start_avfs_btc. AVFS Disabled",
- return -EINVAL;);
-
- smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
- break;
- case AVFS_BTC_DISABLED: /* Do nothing */
- case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
- case AVFS_BTC_ENABLEAVFS:
- break;
- default:
- pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status);
- break;
- }
return 0;
}
@@ -309,8 +287,6 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
/* Only start SMC if SMC RAM is not running */
if (!(smu7_is_smc_ram_running(hwmgr)
|| cgs_is_virtualization_enabled(hwmgr->device))) {
- fiji_avfs_event_mgr(hwmgr, false);
-
/* Check if SMU is running in protected mode */
if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC,
@@ -323,7 +299,8 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
if (result)
return result;
}
- fiji_avfs_event_mgr(hwmgr, true);
+ if (fiji_avfs_event_mgr(hwmgr))
+ hwmgr->avfs_supported = false;
}
/* To initialize all clock gating before RLC loaded and running.*/
@@ -377,8 +354,10 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = fiji_priv;
- if (smu7_init(hwmgr))
+ if (smu7_init(hwmgr)) {
+ kfree(fiji_priv);
return -EINVAL;
+ }
return 0;
}
@@ -2315,19 +2294,12 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
- int ret;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
-
- if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ if (!hwmgr->avfs_supported)
return 0;
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
+ return 0;
}
static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 6255edf58721..4e2f62e659ef 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -271,8 +271,10 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = iceland_priv;
- if (smu7_init(hwmgr))
+ if (smu7_init(hwmgr)) {
+ kfree(iceland_priv);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 632d1ca2f69c..03ec1e59876b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -99,13 +99,13 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
int result = 0;
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ if (0 != smu_data->avfs_btc_param) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
}
- if (smu_data->avfs.avfs_btc_param > 1) {
+ if (smu_data->avfs_btc_param > 1) {
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
@@ -173,46 +173,25 @@ static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
static int
-polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT)
+polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- switch (smu_data->avfs.avfs_btc_status) {
- case AVFS_BTC_COMPLETED_PREVIOUSLY:
- break;
-
- case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
-
- smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
- "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
- return -EINVAL);
-
- if (smu_data->avfs.avfs_btc_param > 1) {
- pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
- "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
- return -EINVAL);
- }
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
+ return -EINVAL);
- smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
- "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
- return -EINVAL);
- smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
- break;
-
- case AVFS_BTC_DISABLED:
- case AVFS_BTC_ENABLEAVFS:
- case AVFS_BTC_NOTSUPPORTED:
- break;
-
- default:
- pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status);
- break;
+ if (smu_data->avfs_btc_param > 1) {
+ pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
+ return -EINVAL);
}
+ PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
+ return -EINVAL);
+
return 0;
}
@@ -312,11 +291,10 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(hwmgr)) {
- SMU_VFT_INTACT = false;
+ if (!(smu7_is_smc_ram_running(hwmgr)
+ || cgs_is_virtualization_enabled(hwmgr->device))) {
smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
@@ -337,11 +315,9 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
if (result != 0)
PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
- polaris10_avfs_event_mgr(hwmgr, true);
- } else
- SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
+ polaris10_avfs_event_mgr(hwmgr);
+ }
- polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
&(smu_data->smu7_data.soft_regs_start), 0x40000);
@@ -373,8 +349,10 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = smu_data;
- if (smu7_init(hwmgr))
+ if (smu7_init(hwmgr)) {
+ kfree(smu_data);
return -EINVAL;
+ }
return 0;
}
@@ -1732,8 +1710,8 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_sclk;
- if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return result;
+ if (!hwmgr->avfs_supported)
+ return 0;
result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
@@ -2070,24 +2048,17 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
- int ret;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ if (!hwmgr->avfs_supported)
return 0;
- ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
- ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
- 0 : -1;
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
+ return 0;
}
static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
deleted file mode 100644
index e2ee23ade5c5..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "smumgr.h"
-#include "rv_inc.h"
-#include "pp_soc15.h"
-#include "rv_smumgr.h"
-#include "ppatomctrl.h"
-#include "rv_ppsmc.h"
-#include "smu10_driver_if.h"
-#include "smu10.h"
-#include "ppatomctrl.h"
-#include "pp_debug.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu7_smumgr.h"
-
-#define VOLTAGE_SCALE 4
-
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072
-
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
-
-
-bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
- uint32_t mp1_fw_flags, reg;
-
- reg = soc15_get_register_offset(NBIF_HWID, 0,
- mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
-
- cgs_write_register(hwmgr->device, reg,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- reg = soc15_get_register_offset(NBIF_HWID, 0,
- mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
-
- mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
-
- if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
- return true;
-
- return false;
-}
-
-static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
-{
- uint32_t reg;
-
- if (!rv_is_smc_ram_running(hwmgr))
- return -EINVAL;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-
- phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
-
- return cgs_read_register(hwmgr->device, reg);
-}
-
-int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg)
-{
- uint32_t reg;
-
- if (!rv_is_smc_ram_running(hwmgr))
- return -EINVAL;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(hwmgr->device, reg, msg);
-
- return 0;
-}
-
-int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
-{
- uint32_t reg;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-
- *arg = cgs_read_register(hwmgr->device, reg);
-
- return 0;
-}
-
-int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- uint32_t reg;
-
- rv_wait_for_response(hwmgr);
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
-
- rv_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- if (rv_wait_for_response(hwmgr) == 0)
- printk("Failed to send Message %x.\n", msg);
-
- return 0;
-}
-
-
-int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- uint32_t reg;
-
- rv_wait_for_response(hwmgr);
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(hwmgr->device, reg, parameter);
-
- rv_send_msg_to_smc_without_waiting(hwmgr, msg);
-
-
- if (rv_wait_for_response(hwmgr) == 0)
- printk("Failed to send Message %x.\n", msg);
-
- return 0;
-}
-
-int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id)
-{
- struct rv_smumgr *priv =
- (struct rv_smumgr *)(hwmgr->smu_backend);
-
- PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
- "Invalid SMU Table ID!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
- "Invalid SMU Table version!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
- "Invalid SMU Table Length!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
- "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
- "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
- return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id) == 0,
- "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
- return -EINVAL;);
-
- memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
- priv->smu_tables.entry[table_id].size);
-
- return 0;
-}
-
-int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id)
-{
- struct rv_smumgr *priv =
- (struct rv_smumgr *)(hwmgr->smu_backend);
-
- PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
- "Invalid SMU Table ID!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
- "Invalid SMU Table version!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
- "Invalid SMU Table Length!", return -EINVAL;);
-
- memcpy(priv->smu_tables.entry[table_id].table, table,
- priv->smu_tables.entry[table_id].size);
-
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
- "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
- return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
- "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
- return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id) == 0,
- "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
- return -EINVAL;);
-
- return 0;
-}
-
-static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
-{
- uint32_t smc_driver_if_version;
-
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion),
- "Attempt to get SMC IF Version Number Failed!",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &smc_driver_if_version),
- "Attempt to read SMC IF Version Number Failed!",
- return -EINVAL);
-
- if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION)
- return -EINVAL;
-
- return 0;
-}
-
-/* sdma is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PowerUpSdma),
- "Attempt to power up sdma Failed!",
- return -EINVAL);
-
- return 0;
-}
-
-static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PowerDownSdma),
- "Attempt to power down sdma Failed!",
- return -EINVAL);
-
- return 0;
-}
-
-/* vcn is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerUpVcn, 0),
- "Attempt to power up vcn Failed!",
- return -EINVAL);
-
- return 0;
-}
-
-static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerDownVcn, 0),
- "Attempt to power down vcn Failed!",
- return -EINVAL);
-
- return 0;
-}
-
-static int rv_smu_fini(struct pp_hwmgr *hwmgr)
-{
- struct rv_smumgr *priv =
- (struct rv_smumgr *)(hwmgr->smu_backend);
-
- if (priv) {
- rv_smc_disable_sdma(hwmgr);
- rv_smc_disable_vcn(hwmgr);
- amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
- &priv->smu_tables.entry[WMTABLE].mc_addr,
- priv->smu_tables.entry[WMTABLE].table);
- amdgpu_bo_free_kernel(&priv->smu_tables.entry[CLOCKTABLE].handle,
- &priv->smu_tables.entry[CLOCKTABLE].mc_addr,
- priv->smu_tables.entry[CLOCKTABLE].table);
- kfree(hwmgr->smu_backend);
- hwmgr->smu_backend = NULL;
- }
-
- return 0;
-}
-
-static int rv_start_smu(struct pp_hwmgr *hwmgr)
-{
- struct cgs_firmware_info info = {0};
-
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- rv_read_arg_from_smc(hwmgr, &hwmgr->smu_version);
- info.version = hwmgr->smu_version >> 8;
-
- cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
-
- if (rv_verify_smc_interface(hwmgr))
- return -EINVAL;
- if (rv_smc_enable_sdma(hwmgr))
- return -EINVAL;
- if (rv_smc_enable_vcn(hwmgr))
- return -EINVAL;
-
- return 0;
-}
-
-static int rv_smu_init(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_bo *handle = NULL;
- struct rv_smumgr *priv;
- uint64_t mc_addr;
- void *kaddr = NULL;
- int r;
-
- priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL);
-
- if (!priv)
- return -ENOMEM;
-
- hwmgr->smu_backend = priv;
-
- /* allocate space for watermarks table */
- r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- sizeof(Watermarks_t),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &handle,
- &mc_addr,
- &kaddr);
-
- if (r)
- return -EINVAL;
-
- priv->smu_tables.entry[WMTABLE].version = 0x01;
- priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
- priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
- priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr;
- priv->smu_tables.entry[WMTABLE].table = kaddr;
- priv->smu_tables.entry[WMTABLE].handle = handle;
-
- /* allocate space for watermarks table */
- r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- sizeof(DpmClocks_t),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &handle,
- &mc_addr,
- &kaddr);
-
- if (r) {
- amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
- &priv->smu_tables.entry[WMTABLE].mc_addr,
- &priv->smu_tables.entry[WMTABLE].table);
- return -EINVAL;
- }
-
- priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
- priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
- priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
- priv->smu_tables.entry[CLOCKTABLE].mc_addr = mc_addr;
- priv->smu_tables.entry[CLOCKTABLE].table = kaddr;
- priv->smu_tables.entry[CLOCKTABLE].handle = handle;
-
- return 0;
-}
-
-const struct pp_smumgr_func rv_smu_funcs = {
- .smu_init = &rv_smu_init,
- .smu_fini = &rv_smu_fini,
- .start_smu = &rv_start_smu,
- .request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = &rv_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &rv_send_msg_to_smc_with_parameter,
- .download_pptable_settings = NULL,
- .upload_pptable_settings = NULL,
-};
-
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
new file mode 100644
index 000000000000..bc53f2beda30
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "smu10_inc.h"
+#include "pp_soc15.h"
+#include "smu10_smumgr.h"
+#include "ppatomctrl.h"
+#include "rv_ppsmc.h"
+#include "smu10_driver_if.h"
+#include "smu10.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+
+
+#define VOLTAGE_SCALE 4
+
+#define BUFFER_SIZE 80000
+#define MAX_STRING_SIZE 15
+#define BUFFER_SIZETWO 131072
+
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+
+
+static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+
+ phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_90__CONTENT_MASK);
+
+ return cgs_read_register(hwmgr->device, reg);
+}
+
+static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+ uint16_t msg)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
+ cgs_write_register(hwmgr->device, reg, msg);
+
+ return 0;
+}
+
+static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+
+ return cgs_read_register(hwmgr->device, reg);
+}
+
+static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ uint32_t reg;
+
+ smu10_wait_for_response(hwmgr);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ cgs_write_register(hwmgr->device, reg, 0);
+
+ smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ if (smu10_wait_for_response(hwmgr) == 0)
+ printk("Failed to send Message %x.\n", msg);
+
+ return 0;
+}
+
+
+static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ uint32_t reg;
+
+ smu10_wait_for_response(hwmgr);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ cgs_write_register(hwmgr->device, reg, 0);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+ cgs_write_register(hwmgr->device, reg, parameter);
+
+ smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+
+ if (smu10_wait_for_response(hwmgr) == 0)
+ printk("Failed to send Message %x.\n", msg);
+
+ return 0;
+}
+
+static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
+{
+ struct smu10_smumgr *priv =
+ (struct smu10_smumgr *)(hwmgr->smu_backend);
+
+ PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
+ "Invalid SMU Table ID!", return -EINVAL;);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
+ "Invalid SMU Table version!", return -EINVAL;);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+ "Invalid SMU Table Length!", return -EINVAL;);
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableSmu2Dram,
+ priv->smu_tables.entry[table_id].table_id);
+
+ memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
+ priv->smu_tables.entry[table_id].size);
+
+ return 0;
+}
+
+static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
+{
+ struct smu10_smumgr *priv =
+ (struct smu10_smumgr *)(hwmgr->smu_backend);
+
+ PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
+ "Invalid SMU Table ID!", return -EINVAL;);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
+ "Invalid SMU Table version!", return -EINVAL;);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+ "Invalid SMU Table Length!", return -EINVAL;);
+
+ memcpy(priv->smu_tables.entry[table_id].table, table,
+ priv->smu_tables.entry[table_id].size);
+
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu,
+ priv->smu_tables.entry[table_id].table_id);
+
+ return 0;
+}
+
+static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
+{
+ uint32_t smc_driver_if_version;
+
+ smu10_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion);
+ smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
+
+ if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION) {
+ pr_err("Attempt to read SMC IF Version Number Failed!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* sdma is disabled by default in vbios, need to re-enable in driver */
+static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr)
+{
+ smu10_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_PowerUpSdma);
+}
+
+static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr)
+{
+ smu10_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_PowerDownSdma);
+}
+
+/* vcn is disabled by default in vbios, need to re-enable in driver */
+static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
+{
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_PowerUpVcn, 0);
+}
+
+static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr)
+{
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_PowerDownVcn, 0);
+}
+
+static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_smumgr *priv =
+ (struct smu10_smumgr *)(hwmgr->smu_backend);
+
+ if (priv) {
+ smu10_smc_disable_sdma(hwmgr);
+ smu10_smc_disable_vcn(hwmgr);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
+ &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_WMTABLE].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ }
+
+ return 0;
+}
+
+static int smu10_start_smu(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
+ adev->pm.fw_version = hwmgr->smu_version >> 8;
+
+ if (smu10_verify_smc_interface(hwmgr))
+ return -EINVAL;
+ smu10_smc_enable_sdma(hwmgr);
+ smu10_smc_enable_vcn(hwmgr);
+ return 0;
+}
+
+static int smu10_smu_init(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_smumgr *priv;
+ int r;
+
+ priv = kzalloc(sizeof(struct smu10_smumgr), GFP_KERNEL);
+
+ if (!priv)
+ return -ENOMEM;
+
+ hwmgr->smu_backend = priv;
+
+ /* allocate space for watermarks table */
+ r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(Watermarks_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[SMU10_WMTABLE].handle,
+ &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_WMTABLE].table);
+
+ if (r)
+ goto err0;
+
+ priv->smu_tables.entry[SMU10_WMTABLE].version = 0x01;
+ priv->smu_tables.entry[SMU10_WMTABLE].size = sizeof(Watermarks_t);
+ priv->smu_tables.entry[SMU10_WMTABLE].table_id = TABLE_WATERMARKS;
+
+ /* allocate space for watermarks table */
+ r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(DpmClocks_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
+
+ if (r)
+ goto err1;
+
+ priv->smu_tables.entry[SMU10_CLOCKTABLE].version = 0x01;
+ priv->smu_tables.entry[SMU10_CLOCKTABLE].size = sizeof(DpmClocks_t);
+ priv->smu_tables.entry[SMU10_CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
+
+ return 0;
+
+err1:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
+ &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_WMTABLE].table);
+err0:
+ kfree(priv);
+ return -EINVAL;
+}
+
+static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+{
+ int ret;
+
+ if (rw)
+ ret = smu10_copy_table_from_smc(hwmgr, table, table_id);
+ else
+ ret = smu10_copy_table_to_smc(hwmgr, table, table_id);
+
+ return ret;
+}
+
+
+const struct pp_smumgr_func smu10_smu_funcs = {
+ .smu_init = &smu10_smu_init,
+ .smu_fini = &smu10_smu_fini,
+ .start_smu = &smu10_start_smu,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = &smu10_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu10_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .get_argument = smu10_read_arg_from_smc,
+ .smc_table_manager = smu10_smc_table_manager,
+};
+
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
index 0ff4ac5838f7..9c2be74a2b2f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
@@ -21,17 +21,13 @@
*
*/
-#ifndef PP_RAVEN_SMUMANAGER_H
-#define PP_RAVEN_SMUMANAGER_H
+#ifndef PP_SMU10_SMUMANAGER_H
+#define PP_SMU10_SMUMANAGER_H
#include "rv_ppsmc.h"
#include "smu10_driver_if.h"
-enum SMU_TABLE_ID {
- WMTABLE = 0,
- CLOCKTABLE,
- MAX_SMU_TABLE,
-};
+#define MAX_SMU_TABLE 2
struct smu_table_entry {
uint32_t version;
@@ -46,16 +42,9 @@ struct smu_table_array {
struct smu_table_entry entry[MAX_SMU_TABLE];
};
-struct rv_smumgr {
+struct smu10_smumgr {
struct smu_table_array smu_tables;
};
-int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
-bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
-int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
-int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
-
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 7394bb46b8b2..0399c10d2be0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -585,7 +585,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
- uint8_t *internal_buf;
uint64_t mc_addr = 0;
int r;
/* Allocate memory for backend private data */
@@ -627,13 +626,10 @@ int smu7_init(struct pp_hwmgr *hwmgr)
&smu_data->header_buffer.kaddr);
return -EINVAL;
}
- internal_buf = smu_data->smu_buffer.kaddr;
smu_data->smu_buffer.mc_addr = mc_addr;
if (smum_is_hw_avfs_present(hwmgr))
- smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
- else
- smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+ hwmgr->avfs_supported = true;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 64334a82b77b..126d300259ba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -36,11 +36,6 @@ struct smu7_buffer_entry {
struct amdgpu_bo *handle;
};
-struct smu7_avfs {
- enum AVFS_BTC_STATUS avfs_btc_status;
- uint32_t avfs_btc_param;
-};
-
struct smu7_smumgr {
uint8_t *header;
uint8_t *mec_image;
@@ -55,7 +50,7 @@ struct smu7_smumgr {
uint32_t ulv_setting_starts;
uint8_t security_hard_key;
uint32_t acpi_optimization;
- struct smu7_avfs avfs;
+ uint32_t avfs_btc_param;
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
new file mode 100644
index 000000000000..8c49704b81af
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -0,0 +1,891 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "cgs_common.h"
+#include "smu/smu_8_0_d.h"
+#include "smu/smu_8_0_sh_mask.h"
+#include "smu8.h"
+#include "smu8_fusion.h"
+#include "smu8_smumgr.h"
+#include "cz_ppsmc.h"
+#include "smu_ucode_xfer_cz.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "smumgr.h"
+
+#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
+
+static const enum smu8_scratch_entry firmware_list[] = {
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+};
+
+static int smu8_smum_get_argument(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ return cgs_read_register(hwmgr->device,
+ mmSMU_MP1_SRBM2P_ARG_0);
+}
+
+static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ int result = 0;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+ SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+ if (result != 0) {
+ pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
+ return result;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
+
+ return 0;
+}
+
+/* Send a message to the SMC, and wait for its response.*/
+static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ int result = 0;
+
+ result = smu8_send_msg_to_smc_async(hwmgr, msg);
+ if (result != 0)
+ return result;
+
+ return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+ SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+}
+
+static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
+ uint32_t smc_address, uint32_t limit)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ if (0 != (3 & smc_address)) {
+ pr_err("SMC address must be 4 byte aligned\n");
+ return -EINVAL;
+ }
+
+ if (limit <= (smc_address + 3)) {
+ pr_err("SMC address beyond the SMC RAM area\n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
+ SMN_MP1_SRAM_START_ADDR + smc_address);
+
+ return 0;
+}
+
+static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
+ uint32_t smc_address, uint32_t value, uint32_t limit)
+{
+ int result;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
+ if (!result)
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
+
+ return result;
+}
+
+static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
+
+ return smu8_send_msg_to_smc(hwmgr, msg);
+}
+
+static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
+ uint32_t firmware)
+{
+ int i;
+ uint32_t index = SMN_MP1_SRAM_START_ADDR +
+ SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ if (firmware ==
+ (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
+ break;
+ udelay(1);
+ }
+
+ if (i >= hwmgr->usec_timeout) {
+ pr_err("SMU check loaded firmware failed.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg_data;
+ uint32_t tmp;
+ int ret = 0;
+ struct cgs_firmware_info info = {0};
+ struct smu8_smumgr *smu8_smu;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ smu8_smu = hwmgr->smu_backend;
+ ret = cgs_get_firmware_info(hwmgr->device,
+ CGS_UCODE_ID_CP_MEC, &info);
+
+ if (ret)
+ return -EINVAL;
+
+ /* Disable MEC parsing/prefetching */
+ tmp = cgs_read_register(hwmgr->device,
+ mmCP_MEC_CNTL);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
+
+ tmp = cgs_read_register(hwmgr->device,
+ mmCP_CPC_IC_BASE_CNTL);
+
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
+
+ reg_data = lower_32_bits(info.mc_addr) &
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
+
+ reg_data = upper_32_bits(info.mc_addr) &
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
+
+ return 0;
+}
+
+static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
+ enum smu8_scratch_entry firmware_enum)
+{
+ uint8_t ret = 0;
+
+ switch (firmware_enum) {
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
+ ret = UCODE_ID_SDMA0;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
+ if (hwmgr->chip_id == CHIP_STONEY)
+ ret = UCODE_ID_SDMA0;
+ else
+ ret = UCODE_ID_SDMA1;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
+ ret = UCODE_ID_CP_CE;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
+ ret = UCODE_ID_CP_PFP;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
+ ret = UCODE_ID_CP_ME;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
+ ret = UCODE_ID_CP_MEC_JT1;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
+ if (hwmgr->chip_id == CHIP_STONEY)
+ ret = UCODE_ID_CP_MEC_JT1;
+ else
+ ret = UCODE_ID_CP_MEC_JT2;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
+ ret = UCODE_ID_GMCON_RENG;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
+ ret = UCODE_ID_RLC_G;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
+ ret = UCODE_ID_RLC_SCRATCH;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
+ ret = UCODE_ID_RLC_SRM_ARAM;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
+ ret = UCODE_ID_RLC_SRM_DRAM;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
+ ret = UCODE_ID_DMCU_ERAM;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
+ ret = UCODE_ID_DMCU_IRAM;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
+ ret = TASK_ARG_INIT_MM_PWR_LOG;
+ break;
+ case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
+ ret = TASK_ARG_REG_MMIO;
+ break;
+ case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
+ ret = TASK_ARG_INIT_CLK_TABLE;
+ break;
+ }
+
+ return ret;
+}
+
+static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+ enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+ switch (fw_type) {
+ case UCODE_ID_SDMA0:
+ result = CGS_UCODE_ID_SDMA0;
+ break;
+ case UCODE_ID_SDMA1:
+ result = CGS_UCODE_ID_SDMA1;
+ break;
+ case UCODE_ID_CP_CE:
+ result = CGS_UCODE_ID_CP_CE;
+ break;
+ case UCODE_ID_CP_PFP:
+ result = CGS_UCODE_ID_CP_PFP;
+ break;
+ case UCODE_ID_CP_ME:
+ result = CGS_UCODE_ID_CP_ME;
+ break;
+ case UCODE_ID_CP_MEC_JT1:
+ result = CGS_UCODE_ID_CP_MEC_JT1;
+ break;
+ case UCODE_ID_CP_MEC_JT2:
+ result = CGS_UCODE_ID_CP_MEC_JT2;
+ break;
+ case UCODE_ID_RLC_G:
+ result = CGS_UCODE_ID_RLC_G;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static int smu8_smu_populate_single_scratch_task(
+ struct pp_hwmgr *hwmgr,
+ enum smu8_scratch_entry fw_enum,
+ uint8_t type, bool is_last)
+{
+ uint8_t i;
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+ struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
+
+ task->type = type;
+ task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
+ task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
+
+ for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
+ if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
+ break;
+
+ if (i >= smu8_smu->scratch_buffer_length) {
+ pr_err("Invalid Firmware Type\n");
+ return -EINVAL;
+ }
+
+ task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
+ task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
+ task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
+
+ if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
+ struct smu8_ih_meta_data *pIHReg_restore =
+ (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
+ pIHReg_restore->command =
+ METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
+ }
+
+ return 0;
+}
+
+static int smu8_smu_populate_single_ucode_load_task(
+ struct pp_hwmgr *hwmgr,
+ enum smu8_scratch_entry fw_enum,
+ bool is_last)
+{
+ uint8_t i;
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+ struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
+
+ task->type = TASK_TYPE_UCODE_LOAD;
+ task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
+ task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
+
+ for (i = 0; i < smu8_smu->driver_buffer_length; i++)
+ if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
+ break;
+
+ if (i >= smu8_smu->driver_buffer_length) {
+ pr_err("Invalid Firmware Type\n");
+ return -EINVAL;
+ }
+
+ task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
+ task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
+ task->size_bytes = smu8_smu->driver_buffer[i].data_size;
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+ TASK_TYPE_UCODE_SAVE, true);
+
+ return 0;
+}
+
+static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+
+ for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
+ toc->JobList[i] = (uint8_t)IGNORE_JOB;
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+
+ toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+ TASK_TYPE_UCODE_SAVE, false);
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+ TASK_TYPE_UCODE_SAVE, true);
+
+ return 0;
+}
+
+
+static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+
+ toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
+
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+
+ if (hwmgr->chip_id == CHIP_STONEY)
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+ else
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
+
+ /* populate scratch */
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+ TASK_TYPE_UCODE_LOAD, false);
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+ TASK_TYPE_UCODE_LOAD, false);
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+ TASK_TYPE_UCODE_LOAD, true);
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+ TASK_TYPE_INITIALIZE, true);
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
+
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
+ if (hwmgr->chip_id != CHIP_STONEY)
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+ if (hwmgr->chip_id != CHIP_STONEY)
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
+ TASK_TYPE_INITIALIZE, true);
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_used_count = 0;
+ smu8_smu_initialize_toc_empty_job_list(hwmgr);
+ smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
+ smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
+ smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
+ smu8_smu_construct_toc_for_power_profiling(hwmgr);
+ smu8_smu_construct_toc_for_bootup(hwmgr);
+ smu8_smu_construct_toc_for_clock_table(hwmgr);
+
+ return 0;
+}
+
+static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ uint32_t firmware_type;
+ uint32_t i;
+ int ret;
+ enum cgs_ucode_id ucode_id;
+ struct cgs_firmware_info info = {0};
+
+ smu8_smu->driver_buffer_length = 0;
+
+ for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
+
+ firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
+ firmware_list[i]);
+
+ ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
+
+ ret = cgs_get_firmware_info(hwmgr->device,
+ ucode_id, &info);
+
+ if (ret == 0) {
+ smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
+
+ smu8_smu->driver_buffer[i].data_size = info.image_size;
+
+ smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
+ smu8_smu->driver_buffer_length++;
+ }
+ }
+
+ return 0;
+}
+
+static int smu8_smu_populate_single_scratch_entry(
+ struct pp_hwmgr *hwmgr,
+ enum smu8_scratch_entry scratch_type,
+ uint32_t ulsize_byte,
+ struct smu8_buffer_entry *entry)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
+
+ entry->data_size = ulsize_byte;
+ entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
+ smu8_smu->smu_buffer_used_bytes;
+ entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
+ entry->firmware_ID = scratch_type;
+
+ smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
+
+ return 0;
+}
+
+static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ unsigned long i;
+
+ for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
+ if (smu8_smu->scratch_buffer[i].firmware_ID
+ == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
+ break;
+ }
+
+ *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetClkTableAddrHi,
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetClkTableAddrLo,
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table);
+
+ smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
+
+ return 0;
+}
+
+static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ unsigned long i;
+
+ for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
+ if (smu8_smu->scratch_buffer[i].firmware_ID
+ == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
+ break;
+ }
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetClkTableAddrHi,
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetClkTableAddrLo,
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table);
+
+ smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
+
+ return 0;
+}
+
+static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ uint32_t smc_address;
+
+ if (!hwmgr->reload_fw) {
+ pr_info("skip reloading...\n");
+ return 0;
+ }
+
+ smu8_smu_populate_firmware_entries(hwmgr);
+
+ smu8_smu_construct_toc(hwmgr);
+
+ smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+ smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DriverDramAddrHi,
+ upper_32_bits(smu8_smu->toc_buffer.mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DriverDramAddrLo,
+ lower_32_bits(smu8_smu->toc_buffer.mc_addr));
+
+ smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_aram);
+ smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_power_profiling_index);
+
+ return smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_initialize_index);
+}
+
+static int smu8_start_smu(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+ uint32_t fw_to_check = 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ uint32_t index = SMN_MP1_SRAM_START_ADDR +
+ SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, Version);
+
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+ hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+ adev->pm.fw_version = hwmgr->smu_version >> 8;
+
+ fw_to_check = UCODE_ID_RLC_G_MASK |
+ UCODE_ID_SDMA0_MASK |
+ UCODE_ID_SDMA1_MASK |
+ UCODE_ID_CP_CE_MASK |
+ UCODE_ID_CP_ME_MASK |
+ UCODE_ID_CP_PFP_MASK |
+ UCODE_ID_CP_MEC_JT1_MASK |
+ UCODE_ID_CP_MEC_JT2_MASK;
+
+ if (hwmgr->chip_id == CHIP_STONEY)
+ fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
+ ret = smu8_request_smu_load_fw(hwmgr);
+ if (ret)
+ pr_err("SMU firmware load failed\n");
+
+ smu8_check_fw_load_finish(hwmgr, fw_to_check);
+
+ ret = smu8_load_mec_firmware(hwmgr);
+ if (ret)
+ pr_err("Mec Firmware load failed\n");
+
+ return ret;
+}
+
+static int smu8_smu_init(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+ struct smu8_smumgr *smu8_smu;
+
+ smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
+ if (smu8_smu == NULL)
+ return -ENOMEM;
+
+ hwmgr->smu_backend = smu8_smu;
+
+ smu8_smu->toc_buffer.data_size = 4096;
+ smu8_smu->smu_buffer.data_size =
+ ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
+ ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
+ ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
+ ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
+ ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
+
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ smu8_smu->toc_buffer.data_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &smu8_smu->toc_buffer.handle,
+ &smu8_smu->toc_buffer.mc_addr,
+ &smu8_smu->toc_buffer.kaddr);
+ if (ret)
+ goto err2;
+
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ smu8_smu->smu_buffer.data_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &smu8_smu->smu_buffer.handle,
+ &smu8_smu->smu_buffer.mc_addr,
+ &smu8_smu->smu_buffer.kaddr);
+ if (ret)
+ goto err1;
+
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+ UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+ UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+ UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+ sizeof(struct SMU8_MultimediaPowerLogData),
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
+ sizeof(struct SMU8_Fusion_ClkTable),
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+
+ return 0;
+
+err0:
+ amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
+ &smu8_smu->smu_buffer.mc_addr,
+ &smu8_smu->smu_buffer.kaddr);
+err1:
+ amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
+ &smu8_smu->toc_buffer.mc_addr,
+ &smu8_smu->toc_buffer.kaddr);
+err2:
+ kfree(smu8_smu);
+ return -EINVAL;
+}
+
+static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ smu8_smu = hwmgr->smu_backend;
+ if (smu8_smu) {
+ amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
+ &smu8_smu->toc_buffer.mc_addr,
+ &smu8_smu->toc_buffer.kaddr);
+ amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
+ &smu8_smu->smu_buffer.mc_addr,
+ &smu8_smu->smu_buffer.kaddr);
+ kfree(smu8_smu);
+ }
+
+ return 0;
+}
+
+static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
+ unsigned long check_feature)
+{
+ int result;
+ unsigned long features;
+
+ result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
+ if (result == 0) {
+ features = smum_get_argument(hwmgr);
+ if (features & check_feature)
+ return true;
+ }
+
+ return false;
+}
+
+static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
+ return true;
+ return false;
+}
+
+const struct pp_smumgr_func smu8_smu_funcs = {
+ .smu_init = smu8_smu_init,
+ .smu_fini = smu8_smu_fini,
+ .start_smu = smu8_start_smu,
+ .check_fw_load_finish = smu8_check_fw_load_finish,
+ .request_smu_load_fw = NULL,
+ .request_smu_load_specific_fw = NULL,
+ .get_argument = smu8_smum_get_argument,
+ .send_msg_to_smc = smu8_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = smu8_download_pptable_settings,
+ .upload_pptable_settings = smu8_upload_pptable_settings,
+ .is_dpm_running = smu8_is_dpm_running,
+};
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
index c13ab8377e26..c7b61222d258 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
@@ -20,63 +20,63 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef _CZ_SMUMGR_H_
-#define _CZ_SMUMGR_H_
+#ifndef _SMU8_SMUMGR_H_
+#define _SMU8_SMUMGR_H_
#define MAX_NUM_FIRMWARE 8
#define MAX_NUM_SCRATCH 11
-#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
-#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
-#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024
-#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4)
+#define SMU8_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
+#define SMU8_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
+#define SMU8_SCRATCH_SIZE_SDMA_METADATA 1024
+#define SMU8_SCRATCH_SIZE_IH ((2*256+1)*4)
#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
-enum cz_scratch_entry {
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
- CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
- CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
- CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
- CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
+enum smu8_scratch_entry {
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+ SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
+ SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
+ SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
+ SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
+ SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START,
+ SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
+ SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
};
-struct cz_buffer_entry {
+struct smu8_buffer_entry {
uint32_t data_size;
uint64_t mc_addr;
void *kaddr;
- enum cz_scratch_entry firmware_ID;
+ enum smu8_scratch_entry firmware_ID;
struct amdgpu_bo *handle; /* as bo handle used when release bo */
};
-struct cz_register_index_data_pair {
+struct smu8_register_index_data_pair {
uint32_t offset;
uint32_t value;
};
-struct cz_ih_meta_data {
+struct smu8_ih_meta_data {
uint32_t command;
- struct cz_register_index_data_pair register_index_value_pair[1];
+ struct smu8_register_index_data_pair register_index_value_pair[1];
};
-struct cz_smumgr {
+struct smu8_smumgr {
uint8_t driver_buffer_length;
uint8_t scratch_buffer_length;
uint16_t toc_entry_used_count;
@@ -88,12 +88,12 @@ struct cz_smumgr {
uint16_t ih_register_restore_task_size;
uint16_t smu_buffer_used_bytes;
- struct cz_buffer_entry toc_buffer;
- struct cz_buffer_entry smu_buffer;
- struct cz_buffer_entry firmware_buffer;
- struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
- struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
- struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
+ struct smu8_buffer_entry toc_buffer;
+ struct smu8_buffer_entry smu_buffer;
+ struct smu8_buffer_entry firmware_buffer;
+ struct smu8_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
+ struct smu8_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
+ struct smu8_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 3645127c8ee2..04c45c236a73 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -28,7 +28,6 @@
#include <linux/types.h>
#include <drm/amdgpu_drm.h>
#include "smumgr.h"
-#include "cgs_common.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
@@ -200,3 +199,11 @@ int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting)
return -EINVAL;
}
+
+int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+{
+ if (hwmgr->smumgr_funcs->smc_table_manager)
+ return hwmgr->smumgr_funcs->smc_table_manager(hwmgr, table, table_id, rw);
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 39d6f4ef96ce..26cca8cce8f1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -229,8 +229,10 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = tonga_priv;
- if (smu7_init(hwmgr))
+ if (smu7_init(hwmgr)) {
+ kfree(tonga_priv);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 15e1afa28018..e08a6116ac05 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -27,11 +27,9 @@
#include "vega10_smumgr.h"
#include "vega10_ppsmc.h"
#include "smu9_driver_if.h"
-
#include "ppatomctrl.h"
#include "pp_debug.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu7_smumgr.h"
+
#define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568
@@ -377,16 +375,13 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
static int vega10_smu_init(struct pp_hwmgr *hwmgr)
{
- struct amdgpu_bo *handle = NULL;
struct vega10_smumgr *priv;
- uint64_t mc_addr;
- void *kaddr = NULL;
unsigned long tools_size;
int ret;
struct cgs_firmware_info info = {0};
ret = cgs_get_firmware_info(hwmgr->device,
- smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
+ CGS_UCODE_ID_SMU,
&info);
if (ret || !info.kptr)
return -EINVAL;
@@ -403,28 +398,24 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
sizeof(PPTable_t),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
- &handle,
- &mc_addr,
- &kaddr);
-
+ &priv->smu_tables.entry[PPTABLE].handle,
+ &priv->smu_tables.entry[PPTABLE].mc_addr,
+ &priv->smu_tables.entry[PPTABLE].table);
if (ret)
- return -EINVAL;
+ goto free_backend;
priv->smu_tables.entry[PPTABLE].version = 0x01;
priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t);
priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE;
- priv->smu_tables.entry[PPTABLE].mc_addr = mc_addr;
- priv->smu_tables.entry[PPTABLE].table = kaddr;
- priv->smu_tables.entry[PPTABLE].handle = handle;
/* allocate space for watermarks table */
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
sizeof(Watermarks_t),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
- &handle,
- &mc_addr,
- &kaddr);
+ &priv->smu_tables.entry[WMTABLE].handle,
+ &priv->smu_tables.entry[WMTABLE].mc_addr,
+ &priv->smu_tables.entry[WMTABLE].table);
if (ret)
goto err0;
@@ -432,18 +423,15 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[WMTABLE].version = 0x01;
priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
- priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr;
- priv->smu_tables.entry[WMTABLE].table = kaddr;
- priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for AVFS table */
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
sizeof(AvfsTable_t),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
- &handle,
- &mc_addr,
- &kaddr);
+ &priv->smu_tables.entry[AVFSTABLE].handle,
+ &priv->smu_tables.entry[AVFSTABLE].mc_addr,
+ &priv->smu_tables.entry[AVFSTABLE].table);
if (ret)
goto err1;
@@ -451,9 +439,6 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[AVFSTABLE].version = 0x01;
priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t);
priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS;
- priv->smu_tables.entry[AVFSTABLE].mc_addr = mc_addr;
- priv->smu_tables.entry[AVFSTABLE].table = kaddr;
- priv->smu_tables.entry[AVFSTABLE].handle = handle;
tools_size = 0x19000;
if (tools_size) {
@@ -461,17 +446,14 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
tools_size,
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
- &handle,
- &mc_addr,
- &kaddr);
+ &priv->smu_tables.entry[TOOLSTABLE].handle,
+ &priv->smu_tables.entry[TOOLSTABLE].mc_addr,
+ &priv->smu_tables.entry[TOOLSTABLE].table);
if (ret)
goto err2;
priv->smu_tables.entry[TOOLSTABLE].version = 0x01;
priv->smu_tables.entry[TOOLSTABLE].size = tools_size;
priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG;
- priv->smu_tables.entry[TOOLSTABLE].mc_addr = mc_addr;
- priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
- priv->smu_tables.entry[TOOLSTABLE].handle = handle;
}
/* allocate space for AVFS Fuse table */
@@ -479,18 +461,16 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
sizeof(AvfsFuseOverride_t),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
- &handle,
- &mc_addr,
- &kaddr);
+ &priv->smu_tables.entry[AVFSFUSETABLE].handle,
+ &priv->smu_tables.entry[AVFSFUSETABLE].mc_addr,
+ &priv->smu_tables.entry[AVFSFUSETABLE].table);
if (ret)
goto err3;
priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01;
priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t);
priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE;
- priv->smu_tables.entry[AVFSFUSETABLE].mc_addr = mc_addr;
- priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr;
- priv->smu_tables.entry[AVFSFUSETABLE].handle = handle;
+
return 0;
@@ -511,6 +491,9 @@ err0:
amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
&priv->smu_tables.entry[PPTABLE].mc_addr,
&priv->smu_tables.entry[PPTABLE].table);
+free_backend:
+ kfree(hwmgr->smu_backend);
+
return -EINVAL;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 211224f6bdd3..fe354ebf374d 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -199,8 +199,8 @@ static struct ttm_backend_func ast_tt_backend_func = {
};
-static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+static struct ttm_tt *ast_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *tt;
@@ -208,7 +208,7 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
if (tt == NULL)
return NULL;
tt->func = &ast_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 18b95329f631..39cd08416773 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -176,8 +176,7 @@ static struct ttm_backend_func bochs_tt_backend_func = {
.destroy = &bochs_ttm_backend_destroy,
};
-static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size,
+static struct ttm_tt *bochs_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct ttm_tt *tt;
@@ -186,7 +185,7 @@ static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
if (tt == NULL)
return NULL;
tt->func = &bochs_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 6cd0233b3bf8..f21953243790 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -199,8 +199,8 @@ static struct ttm_backend_func cirrus_tt_backend_func = {
};
-static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *tt;
@@ -208,7 +208,7 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
if (tt == NULL)
return NULL;
tt->func = &cirrus_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 8dfffdbb6b07..4871025f7573 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -200,8 +200,7 @@ static struct ttm_backend_func hibmc_tt_backend_func = {
.destroy = &hibmc_ttm_backend_destroy,
};
-static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size,
+static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_buffer_object *bo,
u32 page_flags)
{
struct ttm_tt *tt;
@@ -213,7 +212,7 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
return NULL;
}
tt->func = &hibmc_tt_backend_func;
- ret = ttm_tt_init(tt, bdev, size, page_flags);
+ ret = ttm_tt_init(tt, bo, page_flags);
if (ret) {
DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
kfree(tt);
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 69beb2046008..05570f0de4d7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -199,8 +199,8 @@ static struct ttm_backend_func mgag200_tt_backend_func = {
};
-static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *tt;
@@ -208,7 +208,7 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
if (tt == NULL)
return NULL;
tt->func = &mgag200_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 49cc8dfcb141..6f402c4f2bdd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -604,19 +604,17 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
}
static struct ttm_tt *
-nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
- uint32_t page_flags)
+nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
{
#if IS_ENABLED(CONFIG_AGP)
- struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
if (drm->agp.bridge) {
- return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
- page_flags);
+ return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
}
#endif
- return nouveau_sgdma_create_ttm(bdev, size, page_flags);
+ return nouveau_sgdma_create_ttm(bo, page_flags);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 87b030437f4d..8ebdc74cc0ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -82,10 +82,9 @@ static struct ttm_backend_func nv50_sgdma_backend = {
};
struct ttm_tt *
-nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{
- struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -97,7 +96,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags))
+ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
/*
* A failing ttm_dma_tt_init() will call ttm_tt_destroy()
* and thus our nouveau_sgdma_destroy() hook, so we don't need
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index 64e484ee5ef1..89929ad8c7cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -12,8 +12,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
extern const struct ttm_mem_type_manager_func nv04_gart_manager;
-struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
- unsigned long size, u32 page_flags);
+struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo,
+ u32 page_flags);
int nouveau_ttm_init(struct nouveau_drm *drm);
void nouveau_ttm_fini(struct nouveau_drm *drm);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 2ad70eb96207..ee2340e31f06 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -291,19 +291,19 @@ static struct ttm_backend_func qxl_backend_func = {
.destroy = &qxl_ttm_backend_destroy,
};
-static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct qxl_device *qdev;
struct qxl_ttm_tt *gtt;
- qdev = qxl_get_qdev(bdev);
+ qdev = qxl_get_qdev(bo->bdev);
gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
gtt->ttm.ttm.func = &qxl_backend_func;
gtt->qdev = qdev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) {
+ if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5012f5e47a1e..b108eaabb6df 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (radeon_dp_needs_link_train(radeon_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (!radeon_dp_getdpcd(radeon_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+ radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a9962ffba720..27d8e7dd2d06 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
if (robj) {
- if (robj->gem_base.import_attach)
- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
radeon_mn_unregister(robj);
radeon_bo_unref(&robj);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 38431f682ed0..edbb4cd519fd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
WARN_ON_ONCE(!list_empty(&bo->va));
+ if (bo->gem_base.import_attach)
+ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 009f55a2bbf9..8689fcca051c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -686,17 +686,17 @@ static struct ttm_backend_func radeon_backend_func = {
.destroy = &radeon_ttm_backend_destroy,
};
-static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt;
- rdev = radeon_get_rdev(bdev);
+ rdev = radeon_get_rdev(bo->bdev);
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
- size, page_flags);
+ return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
+ page_flags);
}
#endif
@@ -706,7 +706,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
}
gtt->ttm.ttm.func = &radeon_backend_func;
gtt->rdev = rdev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) {
+ if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index f7c2aefbec7c..7c2485fe88d8 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -110,9 +110,9 @@ static struct ttm_backend_func ttm_agp_func = {
.destroy = ttm_agp_destroy,
};
-struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge,
- unsigned long size, uint32_t page_flags)
+ uint32_t page_flags)
{
struct ttm_agp_backend *agp_be;
@@ -124,7 +124,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
agp_be->bridge = bridge;
agp_be->ttm.func = &ttm_agp_func;
- if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags)) {
+ if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
kfree(agp_be);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ad142a92eb80..98e06f8bf23b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -622,14 +622,23 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
reservation_object_assert_held(bo->resv);
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+
+ if (!placement.num_placement && !placement.num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
+
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
evict_mem.bus.io_reserved_vm = false;
evict_mem.bus.io_reserved_count = 0;
- placement.num_placement = 0;
- placement.num_busy_placement = 0;
- bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
if (ret) {
if (ret != -ERESTARTSYS) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 6d6a3f46143b..1f730b3f18e5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -801,3 +801,27 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
return 0;
}
EXPORT_SYMBOL(ttm_bo_pipeline_move);
+
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
+{
+ struct ttm_buffer_object *ghost;
+ int ret;
+
+ ret = ttm_buffer_object_transfer(bo, &ghost);
+ if (ret)
+ return ret;
+
+ ret = reservation_object_copy_fences(ghost->resv, bo->resv);
+ /* Last resort, wait for the BO to be idle when we are OOM */
+ if (ret)
+ ttm_bo_wait(bo, false, false);
+
+ memset(&bo->mem, 0, sizeof(bo->mem));
+ bo->mem.mem_type = TTM_PL_SYSTEM;
+ bo->ttm = NULL;
+
+ ttm_bo_unreserve(ghost);
+ ttm_bo_unref(&ghost);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 0ee3b8f11605..7e672be987b5 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,17 +31,11 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/sched.h>
-#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/export.h>
#include <drm/drm_cache.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -79,14 +73,10 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
return -EINVAL;
}
- bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags);
+ bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
- if (bo->type == ttm_bo_type_sg)
- bo->ttm->sg = bo->sg;
-
return 0;
}
@@ -114,6 +104,16 @@ static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0;
}
+static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+{
+ ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
+ sizeof(*ttm->dma_address),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ttm->dma_address)
+ return -ENOMEM;
+ return 0;
+}
+
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_old,
@@ -233,15 +233,22 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm->func->destroy(ttm);
}
-int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
- ttm->bdev = bdev;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->bdev = bo->bdev;
+ ttm->num_pages = bo->num_pages;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
ttm->state = tt_unpopulated;
ttm->swap_storage = NULL;
+ ttm->sg = bo->sg;
+}
+
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ ttm_tt_init_fields(ttm, bo, page_flags);
if (ttm_tt_alloc_page_directory(ttm)) {
ttm_tt_destroy(ttm);
@@ -259,17 +266,12 @@ void ttm_tt_fini(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- ttm->bdev = bdev;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->caching_state = tt_cached;
- ttm->page_flags = page_flags;
- ttm->state = tt_unpopulated;
- ttm->swap_storage = NULL;
+ ttm_tt_init_fields(ttm, bo, page_flags);
INIT_LIST_HEAD(&ttm_dma->pages_list);
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
@@ -281,11 +283,36 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_dma_tt_init);
+int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ int ret;
+
+ ttm_tt_init_fields(ttm, bo, page_flags);
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ if (page_flags & TTM_PAGE_FLAG_SG)
+ ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
+ else
+ ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (ret) {
+ ttm_tt_destroy(ttm);
+ pr_err("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_sg_tt_init);
+
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- kvfree(ttm->pages);
+ if (ttm->pages)
+ kvfree(ttm->pages);
+ else
+ kvfree(ttm_dma->dma_address);
ttm->pages = NULL;
ttm_dma->dma_address = NULL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index fd5d9450878e..11f8ae5b5332 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -322,20 +322,19 @@ static struct ttm_backend_func virtio_gpu_backend_func = {
.destroy = &virtio_gpu_ttm_backend_destroy,
};
-static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size,
+static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
struct virtio_gpu_device *vgdev;
struct virtio_gpu_ttm_tt *gtt;
- vgdev = virtio_gpu_get_vgdev(bdev);
+ vgdev = virtio_gpu_get_vgdev(bo->bdev);
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
gtt->ttm.ttm.func = &virtio_gpu_backend_func;
gtt->vgdev = vgdev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) {
+ if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index fead3f2dbb46..7177eecb8c9f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -693,8 +693,8 @@ static struct ttm_backend_func vmw_ttm_func = {
.destroy = vmw_ttm_destroy,
};
-static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags)
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct vmw_ttm_tt *vmw_be;
int ret;
@@ -704,13 +704,13 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
return NULL;
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
- vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+ vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags);
+ ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
else
- ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags);
+ ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
if (unlikely(ret != 0))
goto out_no_init;
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
index 2c7daa3d0f24..548edb7c494b 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -193,8 +193,7 @@ static struct ttm_backend_func vbox_tt_backend_func = {
.destroy = &vbox_ttm_backend_destroy,
};
-static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size,
+static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
u32 page_flags)
{
struct ttm_tt *tt;
@@ -204,7 +203,7 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
return NULL;
tt->func = &vbox_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4312b5326f0b..3234cc322e70 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -42,111 +42,10 @@
#include "ttm_memory.h"
#include "ttm_module.h"
#include "ttm_placement.h"
+#include "ttm_tt.h"
#define TTM_MAX_BO_PRIORITY 4U
-struct ttm_backend_func {
- /**
- * struct ttm_backend_func member bind
- *
- * @ttm: Pointer to a struct ttm_tt.
- * @bo_mem: Pointer to a struct ttm_mem_reg describing the
- * memory type and location for binding.
- *
- * Bind the backend pages into the aperture in the location
- * indicated by @bo_mem. This function should be able to handle
- * differences between aperture and system page sizes.
- */
- int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
-
- /**
- * struct ttm_backend_func member unbind
- *
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Unbind previously bound backend pages. This function should be
- * able to handle differences between aperture and system page sizes.
- */
- int (*unbind) (struct ttm_tt *ttm);
-
- /**
- * struct ttm_backend_func member destroy
- *
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Destroy the backend. This will be call back from ttm_tt_destroy so
- * don't call ttm_tt_destroy from the callback or infinite loop.
- */
- void (*destroy) (struct ttm_tt *ttm);
-};
-
-#define TTM_PAGE_FLAG_WRITE (1 << 3)
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_DMA32 (1 << 7)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
-
-enum ttm_caching_state {
- tt_uncached,
- tt_wc,
- tt_cached
-};
-
-/**
- * struct ttm_tt
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @func: Pointer to a struct ttm_backend_func that describes
- * the backend methods.
- * pointer.
- * @pages: Array of pages backing the data.
- * @num_pages: Number of pages in the page array.
- * @bdev: Pointer to the current struct ttm_bo_device.
- * @be: Pointer to the ttm backend.
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @caching_state: The current caching state of the pages.
- * @state: The current binding state of the pages.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-
-struct ttm_tt {
- struct ttm_bo_device *bdev;
- struct ttm_backend_func *func;
- struct page **pages;
- uint32_t page_flags;
- unsigned long num_pages;
- struct sg_table *sg; /* for SG objects via dma-buf */
- struct file *swap_storage;
- enum ttm_caching_state caching_state;
- enum {
- tt_bound,
- tt_unbound,
- tt_unpopulated,
- } state;
-};
-
-/**
- * struct ttm_dma_tt
- *
- * @ttm: Base ttm_tt struct.
- * @dma_address: The DMA (bus) addresses of the pages
- * @pages_list: used by some page allocation backend
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-struct ttm_dma_tt {
- struct ttm_tt ttm;
- dma_addr_t *dma_address;
- struct list_head pages_list;
-};
-
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
@@ -326,8 +225,7 @@ struct ttm_bo_driver {
/**
* ttm_tt_create
*
- * @bdev: pointer to a struct ttm_bo_device:
- * @size: Size of the data needed backing.
+ * @bo: The buffer object to create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
*
* Create a struct ttm_tt to back data with system memory pages.
@@ -335,8 +233,7 @@ struct ttm_bo_driver {
* Returns:
* NULL: Out of memory.
*/
- struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
- unsigned long size,
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
uint32_t page_flags);
/**
@@ -610,117 +507,6 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
return *old;
}
-/**
- * ttm_tt_create
- *
- * @bo: pointer to a struct ttm_buffer_object
- * @zero_alloc: true if allocated pages needs to be zeroed
- *
- * Make sure we have a TTM structure allocated for the given BO.
- * No pages are actually allocated.
- */
-int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
-
-/**
- * ttm_tt_init
- *
- * @ttm: The struct ttm_tt.
- * @bdev: pointer to a struct ttm_bo_device:
- * @size: Size of the data needed backing.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- *
- * Create a struct ttm_tt to back data with system memory pages.
- * No pages are actually allocated.
- * Returns:
- * NULL: Out of memory.
- */
-int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags);
-
-/**
- * ttm_tt_fini
- *
- * @ttm: the ttm_tt structure.
- *
- * Free memory of ttm_tt structure
- */
-void ttm_tt_fini(struct ttm_tt *ttm);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
-
-/**
- * ttm_ttm_bind:
- *
- * @ttm: The struct ttm_tt containing backing pages.
- * @bo_mem: The struct ttm_mem_reg identifying the binding location.
- *
- * Bind the pages of @ttm to an aperture location identified by @bo_mem
- */
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
- struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_ttm_destroy:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind, unpopulate and destroy common struct ttm_tt.
- */
-void ttm_tt_destroy(struct ttm_tt *ttm);
-
-/**
- * ttm_ttm_unbind:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind a struct ttm_tt.
- */
-void ttm_tt_unbind(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_swapin:
- *
- * @ttm: The struct ttm_tt.
- *
- * Swap in a previously swap out ttm_tt.
- */
-int ttm_tt_swapin(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_set_placement_caching:
- *
- * @ttm A struct ttm_tt the backing pages of which will change caching policy.
- * @placement: Flag indicating the desired caching policy.
- *
- * This function will change caching policy of any default kernel mappings of
- * the pages backing @ttm. If changing from cached to uncached or
- * write-combined,
- * all CPU caches will first be flushed to make sure the data of the pages
- * hit RAM. This function may be very costly as it involves global TLB
- * and cache flushes and potential page splitting / combining.
- */
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
-int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
-
-/**
- * ttm_tt_populate - allocate pages for a ttm
- *
- * @ttm: Pointer to the ttm_tt structure
- *
- * Calls the driver method to allocate pages for a ttm
- */
-int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_tt_unpopulate - free pages from a ttm
- *
- * @ttm: Pointer to the ttm_tt structure
- *
- * Calls the driver method to free all pages from a ttm
- */
-void ttm_tt_unpopulate(struct ttm_tt *ttm);
-
/*
* ttm_bo.c
*/
@@ -1062,6 +848,15 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
/**
+ * ttm_bo_pipeline_gutting.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Pipelined gutting a BO of it's backing store.
+ */
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
+
+/**
* ttm_io_prot
*
* @c_state: Caching state.
@@ -1074,27 +869,4 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
-#if IS_ENABLED(CONFIG_AGP)
-#include <linux/agp_backend.h>
-
-/**
- * ttm_agp_tt_create
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @bridge: The agp bridge this device is sitting on.
- * @size: Size of the data needed backing.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- *
- *
- * Create a TTM backend that uses the indicated AGP bridge as an aperture
- * for TT memory. This function uses the linux agpgart interface to
- * bind and unbind memory backing a ttm_tt.
- */
-struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge,
- unsigned long size, uint32_t page_flags);
-int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
-#endif
-
#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
new file mode 100644
index 000000000000..c0e928abf592
--- /dev/null
+++ b/include/drm/ttm/ttm_tt.h
@@ -0,0 +1,272 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _TTM_TT_H_
+#define _TTM_TT_H_
+
+#include <linux/types.h>
+
+struct ttm_tt;
+struct ttm_mem_reg;
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+
+#define TTM_PAGE_FLAG_WRITE (1 << 3)
+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
+#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
+#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
+#define TTM_PAGE_FLAG_DMA32 (1 << 7)
+#define TTM_PAGE_FLAG_SG (1 << 8)
+#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
+
+enum ttm_caching_state {
+ tt_uncached,
+ tt_wc,
+ tt_cached
+};
+
+struct ttm_backend_func {
+ /**
+ * struct ttm_backend_func member bind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+ * memory type and location for binding.
+ *
+ * Bind the backend pages into the aperture in the location
+ * indicated by @bo_mem. This function should be able to handle
+ * differences between aperture and system page sizes.
+ */
+ int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+ /**
+ * struct ttm_backend_func member unbind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Unbind previously bound backend pages. This function should be
+ * able to handle differences between aperture and system page sizes.
+ */
+ int (*unbind) (struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_backend_func member destroy
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*destroy) (struct ttm_tt *ttm);
+};
+
+/**
+ * struct ttm_tt
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
+ * pointer.
+ * @pages: Array of pages backing the data.
+ * @num_pages: Number of pages in the page array.
+ * @bdev: Pointer to the current struct ttm_bo_device.
+ * @be: Pointer to the ttm backend.
+ * @swap_storage: Pointer to shmem struct file for swap storage.
+ * @caching_state: The current caching state of the pages.
+ * @state: The current binding state of the pages.
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_tt {
+ struct ttm_bo_device *bdev;
+ struct ttm_backend_func *func;
+ struct page **pages;
+ uint32_t page_flags;
+ unsigned long num_pages;
+ struct sg_table *sg; /* for SG objects via dma-buf */
+ struct file *swap_storage;
+ enum ttm_caching_state caching_state;
+ enum {
+ tt_bound,
+ tt_unbound,
+ tt_unpopulated,
+ } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+ struct ttm_tt ttm;
+ dma_addr_t *dma_address;
+ struct list_head pages_list;
+};
+
+/**
+ * ttm_tt_create
+ *
+ * @bo: pointer to a struct ttm_buffer_object
+ * @zero_alloc: true if allocated pages needs to be zeroed
+ *
+ * Make sure we have a TTM structure allocated for the given BO.
+ * No pages are actually allocated.
+ */
+int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
+
+/**
+ * ttm_tt_init
+ *
+ * @ttm: The struct ttm_tt.
+ * @bo: The buffer object we create the ttm for.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: the ttm_tt structure.
+ *
+ * Free memory of ttm_tt structure
+ */
+void ttm_tt_fini(struct ttm_tt *ttm);
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
+
+/**
+ * ttm_ttm_bind:
+ *
+ * @ttm: The struct ttm_tt containing backing pages.
+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ *
+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
+ */
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
+ struct ttm_operation_ctx *ctx);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind, unpopulate and destroy common struct ttm_tt.
+ */
+void ttm_tt_destroy(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+void ttm_tt_unbind(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_swapin:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Swap in a previously swap out ttm_tt.
+ */
+int ttm_tt_swapin(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+ * @placement: Flag indicating the desired caching policy.
+ *
+ * This function will change caching policy of any default kernel mappings of
+ * the pages backing @ttm. If changing from cached to uncached or
+ * write-combined,
+ * all CPU caches will first be flushed to make sure the data of the pages
+ * hit RAM. This function may be very costly as it involves global TLB
+ * and cache flushes and potential page splitting / combining.
+ */
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
+
+/**
+ * ttm_tt_populate - allocate pages for a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to allocate pages for a ttm
+ */
+int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+
+/**
+ * ttm_tt_unpopulate - free pages from a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to free all pages from a ttm
+ */
+void ttm_tt_unpopulate(struct ttm_tt *ttm);
+
+#if IS_ENABLED(CONFIG_AGP)
+#include <linux/agp_backend.h>
+
+/**
+ * ttm_agp_tt_create
+ *
+ * @bo: Buffer object we allocate the ttm for.
+ * @bridge: The agp bridge this device is sitting on.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ *
+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
+ * for TT memory. This function uses the linux agpgart interface to
+ * bind and unbind memory backing a ttm_tt.
+ */
+struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
+ struct agp_bridge_data *bridge,
+ uint32_t page_flags);
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+#endif
+
+#endif
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 1816bd8200d1..528f6d041e90 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -806,6 +806,7 @@ struct drm_amdgpu_info_firmware {
#define AMDGPU_VRAM_TYPE_GDDR5 5
#define AMDGPU_VRAM_TYPE_HBM 6
#define AMDGPU_VRAM_TYPE_DDR3 7
+#define AMDGPU_VRAM_TYPE_DDR4 8
struct drm_amdgpu_info_device {
/** PCI Device ID */