diff options
75 files changed, 4589 insertions, 344 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 65c17c59c152..e0d7f4ee7e16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1409,6 +1409,7 @@ bool amdgpu_device_supports_px(struct drm_device *dev); bool amdgpu_device_supports_boco(struct drm_device *dev); bool amdgpu_device_supports_smart_shift(struct drm_device *dev); int amdgpu_device_supports_baco(struct drm_device *dev); +void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_device_baco_enter(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index c02634a124dd..c50202215f6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -753,23 +753,13 @@ int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info) static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank) { - int error_code; - - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(13, 0, 6): - if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) { - error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); - return error_code & 0xff; - } - break; - default: - break; - } + struct amdgpu_aca *aca = &adev->aca; + const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; - /* NOTE: the true error code is encoded in status.errorcode[0:7] */ - error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + if (!smu_funcs || !smu_funcs->parse_error_code) + return -EOPNOTSUPP; - return error_code & 0xff; + return smu_funcs->parse_error_code(adev, bank); } int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size) @@ -780,6 +770,9 @@ int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank return -EINVAL; error_code = aca_bank_get_error_code(adev, bank); + if (error_code < 0) + return error_code; + for (i = 0; i < size; i++) { if (err_codes[i] == error_code) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 3765843ea648..5ef6b745f222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -173,6 +173,7 @@ struct aca_smu_funcs { int (*set_debug_mode)(struct amdgpu_device *adev, bool enable); int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count); int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank); + int (*parse_error_code)(struct amdgpu_device *adev, struct aca_bank *bank); }; struct amdgpu_aca { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index df58a6a1a67e..2131de36e3da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1854,6 +1854,7 @@ err_node_allow: err_bo_create: amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); err_reserve_limit: + amdgpu_sync_free(&(*mem)->sync); mutex_destroy(&(*mem)->lock); if (gobj) drm_gem_object_put(gobj); @@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * amdgpu_sync_create(&sync_obj); - /* Validate BOs and map them to GPUVM (update VM page tables). */ + /* Validate BOs managed by KFD */ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; - struct kfd_mem_attachment *attachment; struct dma_resv_iter cursor; struct dma_fence *fence; @@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * goto validate_map_fail; } } + } + + if (failed_size) + pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); + + /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO + * validations above would invalidate DMABuf imports again. + */ + ret = process_validate_vms(process_info, &exec.ticket); + if (ret) { + pr_debug("Validating VMs failed, ret: %d\n", ret); + goto validate_map_fail; + } + + /* Update mappings managed by KFD. */ + list_for_each_entry(mem, &process_info->kfd_bo_list, + validate_list) { + struct kfd_mem_attachment *attachment; + list_for_each_entry(attachment, &mem->attachments, list) { if (!attachment->is_mapped) continue; @@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * } } - if (failed_size) - pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); - - /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO - * validations above would invalidate DMABuf imports again. - */ - ret = process_validate_vms(process_info, &exec.ticket); - if (ret) { - pr_debug("Validating VMs failed, ret: %d\n", ret); - goto validate_map_fail; - } - /* Update mappings not managed by KFD */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0a4b09709cfb..ec888fc6ead8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -819,7 +819,7 @@ retry: p->bytes_moved += ctx.bytes_moved; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) p->bytes_moved_vis += ctx.bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 1129e5e5fb42..64fe564b8036 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -188,10 +188,11 @@ static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev, adev->vpe.feature_version, adev->vpe.fw_version); drm_printf(p, "\nVBIOS Information\n"); - drm_printf(p, "name: %s\n", ctx->name); - drm_printf(p, "pn %s\n", ctx->vbios_pn); - drm_printf(p, "version: %s\n", ctx->vbios_ver_str); - drm_printf(p, "date: %s\n", ctx->date); + drm_printf(p, "vbios name : %s\n", ctx->name); + drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn); + drm_printf(p, "vbios version : %d\n", ctx->version); + drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str); + drm_printf(p, "vbios date : %s\n", ctx->date); } static ssize_t diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 170da4551ed5..f3b7cb18fd46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -350,6 +350,81 @@ int amdgpu_device_supports_baco(struct drm_device *dev) return amdgpu_asic_supports_baco(adev); } +void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) +{ + struct drm_device *dev; + int bamaco_support; + + dev = adev_to_drm(adev); + + adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; + bamaco_support = amdgpu_device_supports_baco(dev); + + switch (amdgpu_runtime_pm) { + case 2: + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); + } else if (bamaco_support == BACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); + } + break; + case 1: + if (bamaco_support & BACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + dev_info(adev->dev, "Forcing BACO for runtime pm\n"); + } + break; + case -1: + case -2: + if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */ + adev->pm.rpm_mode = AMDGPU_RUNPM_PX; + dev_info(adev->dev, "Using ATPX for runtime pm\n"); + } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */ + adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; + dev_info(adev->dev, "Using BOCO for runtime pm\n"); + } else { + if (!bamaco_support) + goto no_runtime_pm; + + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + /* BACO are not supported on vega20 and arctrus */ + break; + case CHIP_VEGA10: + /* enable BACO as runpm mode if noretry=0 */ + if (!adev->gmc.noretry) + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + break; + default: + /* enable BACO as runpm mode on CI+ */ + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + break; + } + + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Using BAMACO for runtime pm\n"); + } else { + dev_info(adev->dev, "Using BACO for runtime pm\n"); + } + } + } + break; + case 0: + dev_info(adev->dev, "runtime pm is manually disabled\n"); + break; + default: + break; + } + +no_runtime_pm: + if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) + dev_info(adev->dev, "Runtime PM not available\n"); +} /** * amdgpu_device_supports_smart_shift - Is the device dGPU with * smart shift support @@ -1460,7 +1535,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) - DRM_WARN("System can't access extended configuration space,please check!!\n"); + DRM_WARN("System can't access extended configuration space, please check!!\n"); /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && @@ -5282,7 +5357,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, /* Try reset handler method first */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); - amdgpu_reset_reg_dumps(tmp_adev); + + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) + amdgpu_reset_reg_dumps(tmp_adev); reset_context->reset_device_list = device_list_handle; r = amdgpu_reset_perform_reset(tmp_adev, reset_context); @@ -5355,7 +5432,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, vram_lost = amdgpu_device_check_vram_lost(tmp_adev); - amdgpu_coredump(tmp_adev, vram_lost, reset_context); + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) + amdgpu_coredump(tmp_adev, vram_lost, reset_context); if (vram_lost) { DRM_INFO("VRAM is lost due to GPU reset!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 07c5fca06178..0e31bdb4b7cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -255,7 +255,6 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, uint64_t vram_size; u32 msg; int i, ret = 0; - int ip_discovery_ver = 0; /* It can take up to a second for IFWI init to complete on some dGPUs, * but generally it should be in the 60-100ms range. Normally this starts @@ -265,17 +264,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, * continue. */ - ip_discovery_ver = RREG32(mmIP_DISCOVERY_VERSION); - if ((dev_is_removable(&adev->pdev->dev)) || - (ip_discovery_ver == IP_DISCOVERY_V2) || - (ip_discovery_ver == IP_DISCOVERY_V4)) { - for (i = 0; i < 1000; i++) { - msg = RREG32(mmMP0_SMN_C2PMSG_33); - if (msg & 0x80000000) - break; - msleep(1); - } + for (i = 0; i < 1000; i++) { + msg = RREG32(mmMP0_SMN_C2PMSG_33); + if (msg & 0x80000000) + break; + usleep_range(1000, 1100); } + vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; if (vram_size) { @@ -1906,6 +1901,8 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 6ea893ad9a36..c512f70b8272 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2481,6 +2481,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) /* Use a common context, just need to make sure full reset is done */ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); r = amdgpu_do_asic_reset(&device_list, &reset_context); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 55eb4e4eb8f2..a0ea6fe8d060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -133,7 +133,6 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) { struct drm_device *dev; - int bamaco_support = 0; int r, acpi_status; dev = adev_to_drm(adev); @@ -150,52 +149,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) goto out; } - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; - if (amdgpu_device_supports_px(dev) && - (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_PX; - dev_info(adev->dev, "Using ATPX for runtime pm\n"); - } else if (amdgpu_device_supports_boco(dev) && - (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; - dev_info(adev->dev, "Using BOCO for runtime pm\n"); - } else if (amdgpu_runtime_pm != 0) { - bamaco_support = amdgpu_device_supports_baco(dev); - - if (!bamaco_support) - goto no_runtime_pm; - - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: - /* enable BACO as runpm mode if runpm=1 */ - if (amdgpu_runtime_pm > 0) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - case CHIP_VEGA10: - /* enable BACO as runpm mode if noretry=0 */ - if (!adev->gmc.noretry) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - default: - /* enable BACO as runpm mode on CI+ */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - } - - if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { - if (bamaco_support & MACO_SUPPORT) { - adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; - dev_info(adev->dev, "Using BAMACO for runtime pm\n"); - } else { - dev_info(adev->dev, "Using BACO for runtime pm\n"); - } - } - } - -no_runtime_pm: - if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) - dev_info(adev->dev, "NO pm mode for runtime pm\n"); + amdgpu_device_detect_runtime_pm_mode(adev); /* Call ACPI methods: require modeset init * but failure is not fatal diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8bc79924d171..92af057dbf6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -623,8 +623,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, return r; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - bo->tbo.resource->mem_type == TTM_PL_VRAM && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else @@ -1278,23 +1277,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct amdgpu_mem_stats *stats) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_resource *res = bo->tbo.resource; uint64_t size = amdgpu_bo_size(bo); struct drm_gem_object *obj; unsigned int domain; bool shared; /* Abort if the BO doesn't currently have a backing store */ - if (!bo->tbo.resource) + if (!res) return; obj = &bo->tbo.base; shared = drm_gem_object_is_shared_for_memory_stats(obj); - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + domain = amdgpu_mem_type_to_domain(res->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: stats->vram += size; - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) stats->visible_vram += size; if (shared) stats->vram_shared += size; @@ -1395,10 +1396,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - if (bo->resource->mem_type != TTM_PL_VRAM) - return 0; - - if (amdgpu_bo_in_cpu_visible_vram(abo)) + if (amdgpu_res_cpu_visible(adev, bo->resource)) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -1421,7 +1419,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* this should never happen */ if (bo->resource->mem_type == TTM_PL_VRAM && - !amdgpu_bo_in_cpu_visible_vram(abo)) + !amdgpu_res_cpu_visible(adev, bo->resource)) return VM_FAULT_SIGBUS; ttm_bo_move_to_lru_tail_unlocked(bo); @@ -1585,6 +1583,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, */ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct dma_buf_attachment *attachment; struct dma_buf *dma_buf; const char *placement; @@ -1593,10 +1592,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) if (dma_resv_trylock(bo->tbo.base.resv)) { unsigned int domain; + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) placement = "VRAM VISIBLE"; else placement = "VRAM"; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index be679c42b0b8..fa03d9e4874c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -251,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) } /** - * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM - */ -static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct amdgpu_res_cursor cursor; - - if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) - return false; - - amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); - while (cursor.remaining) { - if (cursor.start < adev->gmc.visible_vram_size) - return true; - - amdgpu_res_next(&cursor, cursor.size); - } - - return false; -} - -/** * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced */ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index edae581059af..4bd4602d11b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2265,6 +2265,15 @@ static int psp_hw_start(struct psp_context *psp) } } + if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && + (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { + ret = psp_bootloader_load_ipkeymgr_drv(psp); + if (ret) { + dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); + return ret; + } + } + if ((is_psp_fw_valid(psp->sos)) && (psp->funcs->bootloader_load_sos != NULL)) { ret = psp_bootloader_load_sos(psp); @@ -3280,6 +3289,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp, psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); psp->ras_drv.start_addr = ucode_start_addr; break; + case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: + psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); + psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); + psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); + psp->ipkeymgr_drv.start_addr = ucode_start_addr; + break; default: dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index ee16f134ae92..3635303e6548 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -73,8 +73,10 @@ enum psp_bootloader_cmd { PSP_BL__LOAD_KEY_DATABASE = 0x80000, PSP_BL__LOAD_SOCDRV = 0xB0000, PSP_BL__LOAD_DBGDRV = 0xC0000, + PSP_BL__LOAD_HADDRV = PSP_BL__LOAD_DBGDRV, PSP_BL__LOAD_INTFDRV = 0xD0000, - PSP_BL__LOAD_RASDRV = 0xE0000, + PSP_BL__LOAD_RASDRV = 0xE0000, + PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000, PSP_BL__DRAM_LONG_TRAIN = 0x100000, PSP_BL__DRAM_SHORT_TRAIN = 0x200000, PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000, @@ -117,6 +119,7 @@ struct psp_funcs { int (*bootloader_load_intf_drv)(struct psp_context *psp); int (*bootloader_load_dbg_drv)(struct psp_context *psp); int (*bootloader_load_ras_drv)(struct psp_context *psp); + int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp); int (*bootloader_load_sos)(struct psp_context *psp); int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type); @@ -336,6 +339,7 @@ struct psp_context { struct psp_bin_desc intf_drv; struct psp_bin_desc dbg_drv; struct psp_bin_desc ras_drv; + struct psp_bin_desc ipkeymgr_drv; /* tmr buffer */ struct amdgpu_bo *tmr_bo; @@ -424,6 +428,9 @@ struct amdgpu_psp_funcs { #define psp_bootloader_load_ras_drv(psp) \ ((psp)->funcs->bootloader_load_ras_drv ? \ (psp)->funcs->bootloader_load_ras_drv((psp)) : 0) +#define psp_bootloader_load_ipkeymgr_drv(psp) \ + ((psp)->funcs->bootloader_load_ipkeymgr_drv ? \ + (psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0) #define psp_bootloader_load_sos(psp) \ ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) #define psp_smu_reload_quirk(psp) \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 66125d43cf21..b11d190ece53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -32,6 +32,7 @@ enum AMDGPU_RESET_FLAGS { AMDGPU_NEED_FULL_RESET = 0, AMDGPU_SKIP_HW_RESET = 1, + AMDGPU_SKIP_COREDUMP = 2, }; struct amdgpu_reset_context { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6417cb76ccd4..1d71729e3f6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - amdgpu_bo_in_cpu_visible_vram(abo)) { + amdgpu_res_cpu_visible(adev, bo->resource)) { /* Try evicting to the CPU inaccessible part of VRAM * first, but only set GTT as busy placement, so this @@ -403,40 +403,55 @@ error: return r; } -/* - * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy +/** + * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU + * @adev: amdgpu device + * @res: the resource to check * - * Called by amdgpu_bo_move() + * Returns: true if the full resource is CPU visible, false otherwise. */ -static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_resource *mem) +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res) { - u64 mem_size = (u64)mem->size; struct amdgpu_res_cursor cursor; - u64 end; - if (mem->mem_type == TTM_PL_SYSTEM || - mem->mem_type == TTM_PL_TT) + if (!res) + return false; + + if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || + res->mem_type == AMDGPU_PL_PREEMPT) return true; - if (mem->mem_type != TTM_PL_VRAM) + + if (res->mem_type != TTM_PL_VRAM) return false; - amdgpu_res_first(mem, 0, mem_size, &cursor); - end = cursor.start + cursor.size; + amdgpu_res_first(res, 0, res->size, &cursor); while (cursor.remaining) { + if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size) + return false; amdgpu_res_next(&cursor, cursor.size); + } - if (!cursor.remaining) - break; + return true; +} - /* ttm_resource_ioremap only supports contiguous memory */ - if (end != cursor.start) - return false; +/* + * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy + * + * Called by amdgpu_bo_move() + */ +static bool amdgpu_res_copyable(struct amdgpu_device *adev, + struct ttm_resource *mem) +{ + if (!amdgpu_res_cpu_visible(adev, mem)) + return false; - end = cursor.start + cursor.size; - } + /* ttm_resource_ioremap only supports contiguous memory */ + if (mem->mem_type == TTM_PL_VRAM && + !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) + return false; - return end <= adev->gmc.visible_vram_size; + return true; } /* @@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (r) { /* Check that all memory is CPU accessible */ - if (!amdgpu_mem_visible(adev, old_mem) || - !amdgpu_mem_visible(adev, new_mem)) { + if (!amdgpu_res_copyable(adev, old_mem) || + !amdgpu_res_copyable(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 65ec82141a8e..32cf6b6f6efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res); + int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 619445760037..105d4de0613a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -125,6 +125,7 @@ enum psp_fw_type { PSP_FW_TYPE_PSP_INTF_DRV, PSP_FW_TYPE_PSP_DBG_DRV, PSP_FW_TYPE_PSP_RAS_DRV, + PSP_FW_TYPE_PSP_IPKEYMGR_DRV, PSP_FW_TYPE_MAX_INDEX, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8af3f0fd3073..4e2391c83d7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1647,6 +1647,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, trace_amdgpu_vm_bo_map(bo_va, mapping); } +/* Validate operation parameters to prevent potential abuse */ +static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev, + struct amdgpu_bo *bo, + uint64_t saddr, + uint64_t offset, + uint64_t size) +{ + uint64_t tmp, lpfn; + + if (saddr & AMDGPU_GPU_PAGE_MASK + || offset & AMDGPU_GPU_PAGE_MASK + || size & AMDGPU_GPU_PAGE_MASK) + return -EINVAL; + + if (check_add_overflow(saddr, size, &tmp) + || check_add_overflow(offset, size, &tmp) + || size == 0 /* which also leads to end < begin */) + return -EINVAL; + + /* make sure object fit at this offset */ + if (bo && offset + size > amdgpu_bo_size(bo)) + return -EINVAL; + + /* Ensure last pfn not exceed max_pfn */ + lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT; + if (lpfn >= adev->vm_manager.max_pfn) + return -EINVAL; + + return 0; +} + /** * amdgpu_vm_bo_map - map bo inside a vm * @@ -1673,21 +1704,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; + int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); if (tmp) { @@ -1740,17 +1764,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t eaddr; int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; /* Allocate all the needed memory */ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); @@ -1764,7 +1780,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, } saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; mapping->start = saddr; mapping->last = eaddr; @@ -1851,10 +1867,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; LIST_HEAD(removed); uint64_t eaddr; + int r; + + r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size); + if (r) + return r; - eaddr = saddr + size - 1; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; /* Allocate all the needed memory */ before = kzalloc(sizeof(*before), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index fbb43ae7624f..414ea3f560a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -630,7 +630,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { - u32 mask, inst_mask = adev->sdma.sdma_mask; + u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask; int ret, i; /* generally 1 AID supports 4 instances */ @@ -642,7 +642,9 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { - if ((inst_mask & mask) == mask) + avail_inst = inst_mask & mask; + if (avail_inst == mask || avail_inst == 0x3 || + avail_inst == 0xc) adev->aid_mask |= (1 << i); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index ae6a0d19e247..5dbfef49dd5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4506,14 +4506,11 @@ static int gfx_v11_0_soft_reset(void *handle) gfx_v11_0_set_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); - WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); + soc21_grbm_select(adev, i, k, j, 0); WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); @@ -4523,16 +4520,14 @@ static int gfx_v11_0_soft_reset(void *handle) for (i = 0; i < adev->gfx.me.num_me; ++i) { for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { - tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); - WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); + soc21_grbm_select(adev, i, k, j, 0); WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); } } } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ r = gfx_v11_0_request_gfx_index_mutex(adev, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index ad4ad39f128f..c757ef99e3c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -346,6 +346,21 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev) DELAY, 3); WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + /* Redirect the interrupts to IH RB1 for dGPU */ + if (adev->irq.ih1.ring_size) { + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, + SOURCE_ID_MATCH_ENABLE, 0x1); + + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); + } + pci_set_master(adev->pdev); /* enable interrupts */ @@ -549,8 +564,15 @@ static int ih_v6_0_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - adev->irq.ih1.ring_size = 0; - adev->irq.ih2.ring_size = 0; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, + use_bus_addr); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + } /* initialize ih control register offset */ ih_v6_0_init_register_offset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index b8da0fc29378..29ed78798070 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -346,6 +346,21 @@ static int ih_v6_1_irq_init(struct amdgpu_device *adev) DELAY, 3); WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + /* Redirect the interrupts to IH RB1 for dGPU */ + if (adev->irq.ih1.ring_size) { + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, + SOURCE_ID_MATCH_ENABLE, 0x1); + + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); + } + pci_set_master(adev->pdev); /* enable interrupts */ @@ -550,8 +565,15 @@ static int ih_v6_1_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - adev->irq.ih1.ring_size = 0; - adev->irq.ih2.ring_size = 0; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, + use_bus_addr); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + } /* initialize ih control register offset */ ih_v6_1_init_register_offset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e5230078a4cd..81833395324a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -111,7 +111,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring; unsigned long flags; - signed long timeout = adev->usec_timeout; + signed long timeout = 3000000; /* 3000 ms */ if (amdgpu_emu_mode) { timeout *= 100; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 89992c1c9a62..aba00d961627 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -446,8 +446,6 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, amdgpu_virt_fini_data_exchange(adev); xgpu_nv_send_access_requests_with_param(adev, IDH_RAS_POISON, block, 0, 0); - if (block != AMDGPU_RAS_BLOCK__SDMA) - amdgpu_virt_init_data_exchange(adev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 78a95f8f370b..f08a32c18694 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -169,7 +169,8 @@ static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp) static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp) { - return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV); + /* dbg_drv was renamed to had_drv in psp v14 */ + return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_HADDRV); } static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp) @@ -177,6 +178,10 @@ static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp) return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV); } +static int psp_v14_0_bootloader_load_ipkeymgr_drv(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->ipkeymgr_drv, PSP_BL__LOAD_IPKEYMGRDRV); +} static int psp_v14_0_bootloader_load_sos(struct psp_context *psp) { @@ -653,6 +658,7 @@ static const struct psp_funcs psp_v14_0_funcs = { .bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv, .bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv, .bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv, + .bootloader_load_ipkeymgr_drv = psp_v14_0_bootloader_load_ipkeymgr_drv, .bootloader_load_sos = psp_v14_0_bootloader_load_sos, .ring_create = psp_v14_0_ring_create, .ring_stop = psp_v14_0_ring_stop, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index c368c70df3f4..c3beb872adf8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -144,7 +144,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { enum amdgpu_ras_block block = 0; - int old_poison, ret = -EINVAL; + int old_poison; uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); @@ -163,17 +163,13 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SE2SH: case SOC15_IH_CLIENTID_SE3SH: case SOC15_IH_CLIENTID_UTCL2: - ret = kfd_dqm_evict_pasid(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__GFX; - if (ret) - reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; case SOC15_IH_CLIENTID_VMC: case SOC15_IH_CLIENTID_VMC1: - ret = kfd_dqm_evict_pasid(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__MMHUB; - if (ret) - reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; break; case SOC15_IH_CLIENTID_SDMA0: case SOC15_IH_CLIENTID_SDMA1: @@ -184,22 +180,15 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; default: - break; + dev_warn(dev->adev->dev, + "client %d does not support poison consumption\n", client_id); + return; } kfd_signal_poison_consumed_event(dev, pasid); - /* resetting queue passes, do page retirement without gpu reset - * resetting queue fails, fallback to gpu reset solution - */ - if (!ret) - dev_warn(dev->adev->dev, - "RAS poison consumption, unmap queue flow succeeded: client id %d\n", - client_id); - else - dev_warn(dev->adev->dev, - "RAS poison consumption, fall back to gpu reset flow: client id %d\n", - client_id); + dev_warn(dev->adev->dev, + "poison is consumed by client %d, kick off gpu reset flow\n", client_id); amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index b79986412cd8..aafdf064651f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1922,6 +1922,8 @@ static int signal_eviction_fence(struct kfd_process *p) rcu_read_lock(); ef = dma_fence_get_rcu_safe(&p->ef); rcu_read_unlock(); + if (!ef) + return -EINVAL; ret = dma_fence_signal(ef); dma_fence_put(ef); @@ -1949,10 +1951,9 @@ static void evict_process_worker(struct work_struct *work) * they are responsible stopping the queues and scheduling * the restore work. */ - if (!signal_eviction_fence(p)) - queue_delayed_work(kfd_restore_wq, &p->restore_work, - msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); - else + if (signal_eviction_fence(p) || + mod_delayed_work(kfd_restore_wq, &p->restore_work, + msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) kfd_process_restore_queues(p); pr_debug("Finished evicting pasid 0x%x\n", p->pasid); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4d9a76446df8..0d67fc56249b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1230,6 +1230,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) break; } + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; + break; + default: + break; + } + status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error initializing DMUB HW: %d\n", status); @@ -3037,6 +3046,7 @@ static int dm_resume(void *handle) dc_stream_release(dm_new_crtc_state->stream); dm_new_crtc_state->stream = NULL; } + dm_new_crtc_state->base.color_mgmt_changed = true; } for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index c1a5908b97c8..a2b4ff2cff16 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -272,7 +272,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } - if (asic_id.chip_id == DEVICE_ID_NV_13FE) { + if (ctx->dce_version == DCN_VERSION_2_01) { dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index bec252e1dd27..7eecb3403f74 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -29,6 +29,7 @@ #include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" +#include "dcn32/dcn32_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" @@ -829,7 +830,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dmcu->funcs->set_psr_wait_loop(dmcu, clk_mgr_base->clks.dispclk_khz / 1000 / 7); - if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) { + if (dc->config.enable_auto_dpm_test_logs) { dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 145cdab92ca0..8eefba757da4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3446,6 +3446,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, if (srf_updates[i].surface->flip_immediate) continue; + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, sizeof(flip_addr->dirty_rects)); @@ -5042,8 +5043,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) void dc_power_down_on_boot(struct dc *dc) { if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && - dc->hwss.power_down_on_boot) + dc->hwss.power_down_on_boot) { + + if (dc->caps.ips_support) + dc_exit_ips_for_hw_access(dc); + dc->hwss.power_down_on_boot(dc); + } } void dc_set_power_state( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2633e481234f..876b0e5eda95 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1500,9 +1500,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) return false; } - pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( - pipe_ctx->plane_state->format); - /* Timing borders are part of vactive that we are also supposed to skip in addition * to any stream dst offset. Since dm logic assumes dst is in addressable * space we need to add the left and top borders to dst offsets temporarily. @@ -1514,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) /* Calculate H and V active size */ pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; + pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( + pipe_ctx->plane_state->format); /* depends on h_active */ calculate_recout(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index bf889bdd3925..76bb05f4d6bf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -191,7 +191,7 @@ static void init_state(struct dc *dc, struct dc_state *state) struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) { #ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_configuration_options dml2_opt = dc->dml2_options; + struct dml2_configuration_options *dml2_opt = &dc->dml2_options; #endif struct dc_state *state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); @@ -205,11 +205,11 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p #ifdef CONFIG_DRM_AMD_DC_FP if (dc->debug.using_dml2) { - dml2_opt.use_clock_dc_limits = false; - dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2); + dml2_opt->use_clock_dc_limits = false; + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2); - dml2_opt.use_clock_dc_limits = true; - dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2_dc_power_source); + dml2_opt->use_clock_dc_limits = true; + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source); } #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index ee6493a9a79c..5c7e4884cac2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -495,7 +495,7 @@ bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst) { - int i = 0, j = 0; + unsigned int i, j; if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index db64b0061d70..3c33c3bcbe2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -53,7 +53,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.280" +#define DC_VER "3.2.281" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -309,12 +309,12 @@ struct dc_dcc_setting { unsigned int max_compressed_blk_size; unsigned int max_uncompressed_blk_size; bool independent_64b_blks; - //These bitfields to be used starting with DCN + //These bitfields to be used starting with DCN 3.0 struct { - uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case) - uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN - uint32_t dcc_256_128_128 : 1; //available starting with DCN - uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case) + uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case) + uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0 + uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0 + uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case) } dcc_controls; }; @@ -1003,9 +1003,9 @@ struct dc_debug_options { unsigned int static_screen_wait_frames; bool force_chroma_subsampling_1tap; bool disable_422_left_edge_pixel; + unsigned int force_cositing; }; -struct gpu_info_soc_bounding_box_v1_0; /* Generic structure that can be used to query properties of DC. More fields * can be added as required. @@ -1285,6 +1285,7 @@ struct dc_plane_state { struct tg_color visual_confirm_color; bool is_statically_allocated; + enum chroma_cositing cositing; }; struct dc_plane_info { @@ -1303,6 +1304,7 @@ struct dc_plane_info { int global_alpha_value; bool input_csc_enabled; int layer_index; + enum chroma_cositing cositing; }; #include "dc_stream.h" diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 465e15f57f93..2ad7f60805f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -738,6 +738,13 @@ enum scanning_type { SCANNING_TYPE_UNDEFINED }; +enum chroma_cositing { + CHROMA_COSITING_NONE, + CHROMA_COSITING_LEFT, + CHROMA_COSITING_TOPLEFT, + CHROMA_COSITING_COUNT +}; + struct dc_crtc_timing_flags { uint32_t INTERLACE :1; uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 614d7c27c759..0f66d00ef80f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1050,6 +1050,8 @@ union replay_error_status { struct replay_config { /* Replay feature is supported */ bool replay_supported; + /* Replay caps support DPCD & EDID caps*/ + bool replay_cap_support; /* Power opt flags that are supported */ unsigned int replay_power_opt_supported; /* SMU optimization is supported */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index a2f48d46d199..ee601a6897a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -22,9 +22,6 @@ * Authors: AMD * */ - -#include <linux/delay.h> - #include "resource.h" #include "dce_i2c.h" #include "dce_i2c_hw.h" @@ -315,9 +312,6 @@ static bool setup_engine( /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); - /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index d980e6bd6c66..b7a89c39f445 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -167,7 +167,6 @@ struct dcn10_link_enc_registers { uint32_t DIO_LINKD_CNTL; uint32_t DIO_LINKE_CNTL; uint32_t DIO_LINKF_CNTL; - uint32_t DIG_FIFO_CTRL0; uint32_t DIO_CLK_CNTL; uint32_t DIG_BE_CLK_CNTL; }; @@ -475,9 +474,6 @@ struct dcn10_link_enc_registers { type HPO_DP_ENC_SEL;\ type HPO_HDMI_ENC_SEL -#define DCN32_LINK_ENCODER_REG_FIELD_LIST(type) \ - type DIG_FIFO_OUTPUT_PIXEL_MODE - #define DCN35_LINK_ENCODER_REG_FIELD_LIST(type) \ type DIG_BE_ENABLE;\ type DIG_RB_SWITCH_EN;\ @@ -512,7 +508,6 @@ struct dcn10_link_enc_shift { DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t); - DCN32_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN35_LINK_ENCODER_REG_FIELD_LIST(uint8_t); }; @@ -521,7 +516,6 @@ struct dcn10_link_enc_mask { DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t); - DCN32_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN35_LINK_ENCODER_REG_FIELD_LIST(uint32_t); }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 5838a11efd00..71e9288d60ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -168,6 +168,10 @@ static void opp1_set_pixel_encoding( case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 0, + FMT_SUBSAMPLING_MODE, 0, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); break; case PIXEL_ENCODING_YCBCR422: @@ -177,7 +181,10 @@ static void opp1_set_pixel_encoding( FMT_CBCR_BIT_REDUCTION_BYPASS, 0); break; case PIXEL_ENCODING_YCBCR420: - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 2, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 1); break; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h index 2c0ecfa5a643..c87de68a509e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h @@ -79,6 +79,8 @@ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index c429590f1298..1b96972b9d0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -127,7 +127,6 @@ struct dcn10_stream_enc_registers { uint32_t AFMT_60958_1; uint32_t AFMT_60958_2; uint32_t DIG_FE_CNTL; - uint32_t DIG_FE_CNTL2; uint32_t DIG_FIFO_STATUS; uint32_t DP_MSE_RATE_CNTL; uint32_t DP_MSE_RATE_UPDATE; @@ -570,7 +569,7 @@ struct dcn10_stream_enc_registers { type DP_SEC_GSP11_ENABLE;\ type DP_SEC_GSP11_LINE_NUM -#define SE_REG_FIELD_LIST_DCN3_2(type) \ +#define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \ type DIG_FIFO_OUTPUT_PIXEL_MODE;\ type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\ type DIG_SYMCLK_FE_ON;\ @@ -599,7 +598,7 @@ struct dcn10_stream_encoder_shift { uint8_t HDMI_ACP_SEND; SE_REG_FIELD_LIST_DCN2_0(uint8_t); SE_REG_FIELD_LIST_DCN3_0(uint8_t); - SE_REG_FIELD_LIST_DCN3_2(uint8_t); + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t); SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t); }; @@ -608,7 +607,7 @@ struct dcn10_stream_encoder_mask { uint32_t HDMI_ACP_SEND; SE_REG_FIELD_LIST_DCN2_0(uint32_t); SE_REG_FIELD_LIST_DCN3_0(uint32_t); - SE_REG_FIELD_LIST_DCN3_2(uint32_t); + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t); SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t); }; @@ -667,9 +666,6 @@ void enc1_stream_encoder_send_immediate_sdp_message( void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); -void enc1_stream_encoder_reset_fifo( - struct stream_encoder *enc); - void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index efa2adf4f83d..8da3084d933f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -147,7 +147,7 @@ uint32_t DCN_CUR1_TTU_CNTL1;\ uint32_t VMID_SETTINGS_0 - +/*shared with dcn3.x*/ #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ uint32_t FLIP_PARAMETERS_3;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 16b5ff208d14..ea73473b970a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -395,9 +395,12 @@ static void mpc20_program_ogam_pwl( MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg); REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg); - } + REG_SEQ_SUBMIT(); + PERF_TRACE(); + REG_SEQ_WAIT_DONE(); + PERF_TRACE(); } static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, @@ -501,11 +504,6 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) ASSERT(!mpc_disabled); ASSERT(!mpc_idle); } - - REG_SEQ_SUBMIT(); - PERF_TRACE(); - REG_SEQ_WAIT_DONE(); - PERF_TRACE(); } static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c index 35dd4bac242a..cd2bfcc51276 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c @@ -77,6 +77,7 @@ static void hubp201_program_requestor(struct hubp *hubp, MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode, CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode); + /* no need to program PTE */ REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0, CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size, MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size, @@ -99,6 +100,10 @@ static void hubp201_setup( struct _vcs_dpi_display_rq_regs_st *rq_regs, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) { + /* + * otg is locked when this func is called. Register are double buffered. + * disable the requestors is not needed + */ hubp2_vready_at_or_After_vsync(hubp, pipe_dest); hubp201_program_requestor(hubp, rq_regs); hubp201_program_deadline(hubp, dlg_attr, ttu_attr); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h index 8b95ef251332..be25e8dc0636 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h @@ -30,6 +30,10 @@ #define DPCS_DCN201_MASK_SH_LIST(mask_sh)\ DPCS_MASK_SH_LIST(mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD_EN, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD_EN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\ @@ -44,7 +48,15 @@ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh) #define DPCS_DCN201_REG_LIST(id) \ - DPCS_DCN2_CMN_REG_LIST(id) + DPCS_DCN2_CMN_REG_LIST(id), \ + SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id) void dcn201_link_encoder_construct( struct dcn20_link_encoder *enc20, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h index 35a613bb08bf..08a57ea4591c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h @@ -29,13 +29,6 @@ #include "dcn20/dcn20_dccg.h" -#define DCCG_REG_LIST_DCN3AG() \ - DCCG_COMMON_REG_LIST_DCN_BASE(),\ - SR(PHYASYMCLK_CLOCK_CNTL),\ - SR(PHYBSYMCLK_CLOCK_CNTL),\ - SR(PHYCSYMCLK_CLOCK_CNTL) - - #define DCCG_REG_LIST_DCN30() \ DCCG_REG_LIST_DCN2(),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\ @@ -46,17 +39,6 @@ SR(PHYBSYMCLK_CLOCK_CNTL),\ SR(PHYCSYMCLK_CLOCK_CNTL) -#define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \ - DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\ - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\ - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh) - #define DCCG_MASK_SH_LIST_DCN3(mask_sh) \ DCCG_MASK_SH_LIST_DCN2(mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 8ed7125d230d..425b830b88d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -29,7 +29,6 @@ #include "reg_helper.h" #include "hw_shared.h" #include "dc.h" -#include "core_types.h" #define DC_LOGGER \ enc1->base.ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c index 1b9d9495f76d..fae98cf52020 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c @@ -251,9 +251,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = { .set_fc_enable = dwb3_set_fc_enable, .set_stereo = dwb3_set_stereo, .set_new_content = dwb3_set_new_content, - .dwb_program_output_csc = NULL, .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename - .dwb_set_scaler = NULL, }; void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c index a046664e2031..c1959672df50 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c @@ -63,6 +63,7 @@ static const struct hubbub_funcs hubbub301_funcs = { .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, + .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index e224a028d68a..8a0460e86309 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -248,14 +248,12 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; - - enc10->base.features = *enc_features; if (enc10->base.connector.id == CONNECTOR_ID_USBC) enc10->base.features.flags.bits.DP_IS_USB_C = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; enc10->base.transmitter = init_data->transmitter; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index 87eab924ecaf..20f810a6646c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -183,6 +183,8 @@ void dcn35_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; @@ -237,8 +239,6 @@ void dcn35_link_encoder_construct( } enc10->base.features.flags.bits.HDMI_6GB_EN = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index ed1e2f65f5b5..f8c0cee34080 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -395,7 +395,9 @@ void dpp3_set_cursor_attributes( if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - cur_rom_en = 1; + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } } REG_UPDATE_3(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index c4944478ed91..a53092cd619b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -116,10 +116,10 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .update_visual_confirm_color = dcn10_update_visual_confirm_color, .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, .update_dsc_pg = dcn32_update_dsc_pg, - .calc_blocks_to_gate = dcn351_calc_blocks_to_gate, - .calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate, - .hw_block_power_up = dcn351_hw_block_power_up, - .hw_block_power_down = dcn351_hw_block_power_down, + .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, + .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, + .hw_block_power_up = dcn35_hw_block_power_up, + .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, }; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 9aa39bd25be9..c16e915686fc 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2547,7 +2547,7 @@ struct resource_pool *dcn32_create_resource_pool( * full update which delays the flip for 1 frame. If we use the original pipe * we don't have to toggle its power. So we can flip faster. */ -static int find_optimal_free_pipe_as_secondary_dpp_pipe( +int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, const struct resource_pool *pool, @@ -2730,7 +2730,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); - free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( + free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool, opp_head_pipe); if (free_pipe_idx >= 0) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 286e20ad46ed..fee67fbab8e2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -137,6 +137,12 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); bool dcn32_is_center_timing(struct pipe_ctx *pipe); bool dcn32_is_psr_capable(struct pipe_ctx *pipe); +int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *new_opp_head); + struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( const struct dc_state *cur_ctx, struct dc_state *new_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 8a57adb27264..3acfbbac8538 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -758,7 +758,7 @@ static const struct dc_debug_options debug_defaults_drv = { //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions .enable_double_buffered_dsc_pg_support = true, .enable_dp_dig_pixel_rate_div_policy = 1, - .disable_z10 = true, + .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, .ips2_eval_delay_us = 2000, @@ -1722,13 +1722,12 @@ static bool dcn351_validate_bandwidth(struct dc *dc, return out; DC_FP_START(); - dcn351_decide_zstate_support(dc, context); + dcn35_decide_zstate_support(dc, context); DC_FP_END(); return out; } - static struct resource_funcs dcn351_res_pool_funcs = { .destroy = dcn351_destroy_resource_pool, .link_enc_create = dcn35_link_encoder_create, diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 662bdb0e5d3d..2fde1f043d50 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -297,6 +297,7 @@ struct dmub_srv_hw_params { bool dpia_hpd_int_enable_supported; bool disable_clock_gate; bool disallow_dispclk_dppclk_ds; + bool ips_sequential_ono; enum dmub_memory_access_type mem_access_type; enum dmub_ips_disable_type disable_ips; }; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 944f14307517..e85fd3ac52c7 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -1614,7 +1614,7 @@ struct dmub_rb_cmd_idle_opt_dcn_restore { */ struct dmub_dcn_notify_idle_cntl_data { uint8_t driver_idle; - uint8_t pad[1]; + uint8_t reserved[59]; }; /** @@ -2335,6 +2335,11 @@ enum phy_link_rate { * UHBR10 - 20.0 Gbps/Lane */ PHY_RATE_2000 = 11, + + PHY_RATE_675 = 12, + /** + * Rate 12 - 6.75 Gbps/Lane + */ }; /** @@ -3062,6 +3067,11 @@ enum dmub_cmd_replay_type { * Set pseudo vtotal */ DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7, + /** + * Set adaptive sync sdp enabled + */ + DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8, + }; /** @@ -3263,6 +3273,20 @@ struct dmub_cmd_replay_set_pseudo_vtotal { */ uint8_t pad; }; +struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * enabled: set adaptive sync sdp enabled + */ + uint8_t force_disabled; + + uint8_t pad[2]; +}; /** * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. @@ -3367,6 +3391,20 @@ struct dmub_rb_cmd_replay_set_pseudo_vtotal { }; /** + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ +struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data; +}; + +/** * Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. */ struct dmub_cmd_replay_frameupdate_timer_data { @@ -3421,6 +3459,11 @@ union dmub_replay_cmd_set { * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data. */ struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data; + /** + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data. + */ + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data; + }; /** @@ -4096,6 +4139,10 @@ enum dmub_cmd_panel_cntl_type { * Queries backlight info for the embedded panel. */ DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1, + /** + * Sets the PWM Freq as per user's requirement. + */ + DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2, }; /** @@ -4668,6 +4715,10 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal; /** + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ + struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp; + /** * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. */ struct dmub_rb_cmd_assr_enable assr_enable; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 98afaecd3984..70e63aeb8f89 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -420,6 +420,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds; boot_options.bits.disable_clk_gate = params->disable_clock_gate; boot_options.bits.ips_disable = params->disable_ips; + boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h index 8b931bbabe70..969e006b859b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h @@ -237,6 +237,10 @@ #define regSEM_REGISTER_LAST_PART2_BASE_IDX 0 #define regIH_CLIENT_CFG 0x0184 #define regIH_CLIENT_CFG_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_INDEX 0x0185 +#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_DATA 0x0186 +#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0 #define regIH_CLIENT_CFG_INDEX 0x0188 #define regIH_CLIENT_CFG_INDEX_BASE_IDX 0 #define regIH_CLIENT_CFG_DATA 0x0189 diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h index f262f44fa68c..a672a91e58f0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h @@ -888,6 +888,16 @@ //IH_CLIENT_CFG #define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0 #define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL +//IH_RING1_CLIENT_CFG_INDEX +#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L +//IH_RING1_CLIENT_CFG_DATA +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10 +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L //IH_CLIENT_CFG_INDEX #define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 #define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index dc2a864b0f51..7789b313285c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -45,6 +45,7 @@ #include "smu_v13_0_6_ppt.h" #include "smu_v13_0_7_ppt.h" #include "smu_v14_0_0_ppt.h" +#include "smu_v14_0_2_ppt.h" #include "amd_pcie.h" /* @@ -715,6 +716,10 @@ static int smu_set_funcs(struct amdgpu_device *adev) case IP_VERSION(14, 0, 1): smu_v14_0_0_set_ppt_funcs(smu); break; + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + smu_v14_0_2_set_ppt_funcs(smu); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h new file mode 100644 index 000000000000..97a29b80fb13 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h @@ -0,0 +1,1836 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU14_DRIVER_IF_V14_0_H +#define SMU14_DRIVER_IF_V14_0_H + +//Increment this version if SkuTable_t or BoardTable_t change +#define PPTABLE_VERSION 0x18 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_DPPCLK_DPM_LEVELS 8 +#define NUM_DPREFCLK_DPM_LEVELS 8 +#define NUM_DCFCLK_DPM_LEVELS 8 +#define NUM_DTBCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 6 +#define NUM_LINK_LEVELS 3 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_OD_FAN_MAX_POINTS 6 + +// Feature Control Defines +#define FEATURE_FW_DATA_READ_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2 +#define FEATURE_DPM_UCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_SOCCLK_BIT 5 +#define FEATURE_DPM_LINK_BIT 6 +#define FEATURE_DPM_DCN_BIT 7 +#define FEATURE_VMEMP_SCALING_BIT 8 +#define FEATURE_VDDIO_MEM_SCALING_BIT 9 +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_FCLK_BIT 12 +#define FEATURE_DS_LCLK_BIT 13 +#define FEATURE_DS_DCFCLK_BIT 14 +#define FEATURE_DS_UCLK_BIT 15 +#define FEATURE_GFX_ULV_BIT 16 +#define FEATURE_FW_DSTATE_BIT 17 +#define FEATURE_GFXOFF_BIT 18 +#define FEATURE_BACO_BIT 19 +#define FEATURE_MM_DPM_BIT 20 +#define FEATURE_SOC_MPCLK_DS_BIT 21 +#define FEATURE_BACO_MPCLK_DS_BIT 22 +#define FEATURE_THROTTLERS_BIT 23 +#define FEATURE_SMARTSHIFT_BIT 24 +#define FEATURE_GTHR_BIT 25 +#define FEATURE_ACDC_BIT 26 +#define FEATURE_VR0HOT_BIT 27 +#define FEATURE_FW_CTF_BIT 28 +#define FEATURE_FAN_CONTROL_BIT 29 +#define FEATURE_GFX_DCS_BIT 30 +#define FEATURE_GFX_READ_MARGIN_BIT 31 +#define FEATURE_LED_DISPLAY_BIT 32 +#define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 33 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 34 +#define FEATURE_OPTIMIZED_VMIN_BIT 35 +#define FEATURE_GFX_IMU_BIT 36 +#define FEATURE_BOOT_TIME_CAL_BIT 37 +#define FEATURE_GFX_PCC_DFLL_BIT 38 +#define FEATURE_SOC_CG_BIT 39 +#define FEATURE_DF_CSTATE_BIT 40 +#define FEATURE_GFX_EDC_BIT 41 +#define FEATURE_BOOT_POWER_OPT_BIT 42 +#define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 43 +#define FEATURE_DS_VCN_BIT 44 +#define FEATURE_BACO_CG_BIT 45 +#define FEATURE_MEM_TEMP_READ_BIT 46 +#define FEATURE_ATHUB_MMHUB_PG_BIT 47 +#define FEATURE_SOC_PCC_BIT 48 +#define FEATURE_EDC_PWRBRK_BIT 49 +#define FEATURE_SOC_EDC_XVMIN_BIT 50 +#define FEATURE_GFX_PSM_DIDT_BIT 51 +#define FEATURE_APT_ALL_ENABLE_BIT 52 +#define FEATURE_APT_SQ_THROTTLE_BIT 53 +#define FEATURE_APT_PF_DCS_BIT 54 +#define FEATURE_GFX_EDC_XVMIN_BIT 55 +#define FEATURE_GFX_DIDT_XVMIN_BIT 56 +#define FEATURE_FAN_ABNORMAL_BIT 57 +#define FEATURE_CLOCK_STRETCH_COMPENSATOR 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 +#define NUM_FEATURES 64 + +#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL +#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \ + (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \ + (1 << FEATURE_DPM_UCLK_BIT) | \ + (1 << FEATURE_DPM_FCLK_BIT) | \ + (1 << FEATURE_DPM_SOCCLK_BIT) | \ + (1 << FEATURE_DPM_LINK_BIT) | \ + (1 << FEATURE_DPM_DCN_BIT) | \ + (1 << FEATURE_DS_GFXCLK_BIT) | \ + (1 << FEATURE_DS_SOCCLK_BIT) | \ + (1 << FEATURE_DS_FCLK_BIT) | \ + (1 << FEATURE_DS_LCLK_BIT) | \ + (1 << FEATURE_DS_DCFCLK_BIT) | \ + (1 << FEATURE_DS_UCLK_BIT) | \ + (1ULL << FEATURE_DS_VCN_BIT) + + +//For use with feature control messages +typedef enum { + FEATURE_PWR_ALL, + FEATURE_PWR_S5, + FEATURE_PWR_BACO, + FEATURE_PWR_SOC, + FEATURE_PWR_GFX, + FEATURE_PWR_DOMAIN_COUNT, +} FEATURE_PWR_DOMAIN_e; + +//For use with feature control + BTC save restore +typedef enum { + FEATURE_BTC_NOP, + FEATURE_BTC_SAVE, + FEATURE_BTC_RESTORE, + FEATURE_BTC_COUNT, +} FEATURE_BTC_e; + +// Debug Overrides Bitmask +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008 +#define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010 +#define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020 +#define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040 +#define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080 +#define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100 +#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200 +#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400 +#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800 +#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000 +#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000 +#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000 +#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +typedef enum { + SVI_PSI_0, // Full phase count (default) + SVI_PSI_1, // Phase count 1st level + SVI_PSI_2, // Phase count 2nd level + SVI_PSI_3, // Single phase operation + active diode emulation + SVI_PSI_4, // Single phase operation + passive diode emulation *optional* + SVI_PSI_5, // Reserved + SVI_PSI_6, // Power down to 0V (voltage regulation disabled) + SVI_PSI_7, // Automated phase shedding and diode emulation +} SVI_PSI_e; + +// Throttler Control/Status Bits +#define THROTTLER_TEMP_EDGE_BIT 0 +#define THROTTLER_TEMP_HOTSPOT_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_GFX_BIT 2 +#define THROTTLER_TEMP_HOTSPOT_SOC_BIT 3 +#define THROTTLER_TEMP_MEM_BIT 4 +#define THROTTLER_TEMP_VR_GFX_BIT 5 +#define THROTTLER_TEMP_VR_SOC_BIT 6 +#define THROTTLER_TEMP_VR_MEM0_BIT 7 +#define THROTTLER_TEMP_VR_MEM1_BIT 8 +#define THROTTLER_TEMP_LIQUID0_BIT 9 +#define THROTTLER_TEMP_LIQUID1_BIT 10 +#define THROTTLER_TEMP_PLX_BIT 11 +#define THROTTLER_TDC_GFX_BIT 12 +#define THROTTLER_TDC_SOC_BIT 13 +#define THROTTLER_PPT0_BIT 14 +#define THROTTLER_PPT1_BIT 15 +#define THROTTLER_PPT2_BIT 16 +#define THROTTLER_PPT3_BIT 17 +#define THROTTLER_FIT_BIT 18 +#define THROTTLER_GFX_APCC_PLUS_BIT 19 +#define THROTTLER_GFX_DVO_BIT 20 +#define THROTTLER_COUNT 21 + +// FW DState Features Control Bits +#define FW_DSTATE_SOC_ULV_BIT 0 +#define FW_DSTATE_G6_HSR_BIT 1 +#define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2 +#define FW_DSTATE_SMN_DS_BIT 3 +#define FW_DSTATE_MP1_WHISPER_MODE_BIT 4 +#define FW_DSTATE_SOC_LIV_MIN_BIT 5 +#define FW_DSTATE_SOC_PLL_PWRDN_BIT 6 +#define FW_DSTATE_MEM_PLL_PWRDN_BIT 7 +#define FW_DSTATE_MALL_ALLOC_BIT 8 +#define FW_DSTATE_MEM_PSI_BIT 9 +#define FW_DSTATE_HSR_NON_STROBE_BIT 10 +#define FW_DSTATE_MP0_ENTER_WFI_BIT 11 +#define FW_DSTATE_MALL_FLUSH_BIT 12 +#define FW_DSTATE_SOC_PSI_BIT 13 +#define FW_DSTATE_MMHUB_INTERLOCK_BIT 14 +#define FW_DSTATE_D0i3_2_QUIET_FW_BIT 15 +#define FW_DSTATE_CLDO_PRG_BIT 16 +#define FW_DSTATE_DF_PLL_PWRDN_BIT 17 + +//LED Display Mask & Control Bits +#define LED_DISPLAY_GFX_DPM_BIT 0 +#define LED_DISPLAY_PCIE_BIT 1 +#define LED_DISPLAY_ERROR_BIT 2 + + +#define MEM_TEMP_READ_OUT_OF_BAND_BIT 0 +#define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1 +#define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2 + +typedef enum { + SMARTSHIFT_VERSION_1, + SMARTSHIFT_VERSION_2, + SMARTSHIFT_VERSION_3, +} SMARTSHIFT_VERSION_e; + +typedef enum { + FOPT_CALC_AC_CALC_DC, + FOPT_PPTABLE_AC_CALC_DC, + FOPT_CALC_AC_PPTABLE_DC, + FOPT_PPTABLE_AC_PPTABLE_DC, +} FOPT_CALC_e; + +typedef enum { + DRAM_BIT_WIDTH_DISABLED = 0, + DRAM_BIT_WIDTH_X_8 = 8, + DRAM_BIT_WIDTH_X_16 = 16, + DRAM_BIT_WIDTH_X_32 = 32, + DRAM_BIT_WIDTH_X_64 = 64, + DRAM_BIT_WIDTH_X_128 = 128, + DRAM_BIT_WIDTH_COUNT, +} DRAM_BIT_WIDTH_TYPE_e; + +//I2C Interface +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 24 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VMEMP, + I2C_CONTROLLER_NAME_VR_VDDIO, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_FAN_INTAKE, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VMEMP, + I2C_CONTROLLER_THROTTLER_VR_VDDIO, + I2C_CONTROLLER_THROTTLER_LIQUID0, + I2C_CONTROLLER_THROTTLER_LIQUID1, + I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_FAN_INTAKE, + I2C_CONTROLLER_THROTTLER_INA3221, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, + I2C_CONTROLLER_PROTOCOL_VR_IR35217, + I2C_CONTROLLER_PROTOCOL_TMP_MAX31875, + I2C_CONTROLLER_PROTOCOL_INA3221, + I2C_CONTROLLER_PROTOCOL_TMP_MAX6604, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; + uint8_t PaddingConfig; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 +#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) +#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) + +typedef struct { + uint8_t ReadWriteData; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select + uint8_t SlaveAddress; //Slave address of device + uint8_t NumCmds; //Number of commands + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; +} SwI2cRequest_t; // SW I2C Request Table + +typedef struct { + SwI2cRequest_t SwI2cRequest; + + uint32_t Spare[8]; + uint32_t MmHubPadding[8]; // SMU internal use +} SwI2cRequestExternal_t; + +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[24]; +} EccInfoTable_t; + +//D3HOT sequences +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +} D3HOTSequence_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +typedef enum { + DCS_ARCH_DISABLED, + DCS_ARCH_FADCS, + DCS_ARCH_ASYNC, +} DCS_ARCH_e; + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_DCLK_0, + PPCLK_VCLK_0, + PPCLK_DISPCLK, + PPCLK_DPPCLK, + PPCLK_DPREFCLK, + PPCLK_DCFCLK, + PPCLK_DTBCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + VOLTAGE_MODE_PPTABLE = 0, + VOLTAGE_MODE_FUSES, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + AVFS_TEMP_COLD = 0, + AVFS_TEMP_HOT, + AVFS_TEMP_COUNT, +} AVFS_TEMP_e; + +typedef enum { + AVFS_D_G, + AVFS_D_COUNT, +} AVFS_D_e; + + +typedef enum { + UCLK_DIV_BY_1 = 0, + UCLK_DIV_BY_2, + UCLK_DIV_BY_4, + UCLK_DIV_BY_8, +} UCLK_DIV_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, + PWR_CONFIG_TBP_DESKTOP, + PWR_CONFIG_TBP_MOBILE, +} PwrConfig_e; + +typedef struct { + uint8_t Padding; + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + uint32_t Padding3[3]; + uint16_t Padding4; + uint16_t FoptimalDc; //Foptimal frequency in DC power mode. + uint16_t FoptimalAc; //Foptimal frequency in AC power mode. + uint16_t Padding2; +} DpmDescriptor_t; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_HOTSPOT_GFX, + TEMP_HOTSPOT_SOC, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_SOC, + TEMP_VR_MEM0, + TEMP_VR_MEM1, + TEMP_LIQUID0, + TEMP_LIQUID1, + TEMP_PLX, + TEMP_COUNT, +} TEMP_e; + +typedef enum { + TDC_THROTTLER_GFX, + TDC_THROTTLER_SOC, + TDC_THROTTLER_COUNT +} TDC_THROTTLER_e; + +typedef enum { + SVI_PLANE_VDD_GFX, + SVI_PLANE_VDD_SOC, + SVI_PLANE_VDDCI_MEM, + SVI_PLANE_VDDIO_MEM, + SVI_PLANE_COUNT, +} SVI_PLANE_e; + +typedef enum { + PMFW_VOLT_PLANE_GFX, + PMFW_VOLT_PLANE_SOC, + PMFW_VOLT_PLANE_COUNT +} PMFW_VOLT_PLANE_e; + +typedef enum { + CUSTOMER_VARIANT_ROW, + CUSTOMER_VARIANT_FALCON, + CUSTOMER_VARIANT_COUNT, +} CUSTOMER_VARIANT_e; + +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { + MEM_VENDOR_PLACEHOLDER0, // 0 + MEM_VENDOR_SAMSUNG, // 1 + MEM_VENDOR_INFINEON, // 2 + MEM_VENDOR_ELPIDA, // 3 + MEM_VENDOR_ETRON, // 4 + MEM_VENDOR_NANYA, // 5 + MEM_VENDOR_HYNIX, // 6 + MEM_VENDOR_MOSEL, // 7 + MEM_VENDOR_WINBOND, // 8 + MEM_VENDOR_ESMT, // 9 + MEM_VENDOR_PLACEHOLDER1, // 10 + MEM_VENDOR_PLACEHOLDER2, // 11 + MEM_VENDOR_PLACEHOLDER3, // 12 + MEM_VENDOR_PLACEHOLDER4, // 13 + MEM_VENDOR_PLACEHOLDER5, // 14 + MEM_VENDOR_MICRON, // 15 + MEM_VENDOR_COUNT, +} MEM_VENDOR_e; + +typedef enum { + PP_GRTAVFS_HW_CPO_CTL_ZONE0, + PP_GRTAVFS_HW_CPO_CTL_ZONE1, + PP_GRTAVFS_HW_CPO_CTL_ZONE2, + PP_GRTAVFS_HW_CPO_CTL_ZONE3, + PP_GRTAVFS_HW_CPO_CTL_ZONE4, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4, + PP_GRTAVFS_HW_ZONE0_VF, + PP_GRTAVFS_HW_ZONE1_VF1, + PP_GRTAVFS_HW_ZONE2_VF2, + PP_GRTAVFS_HW_ZONE3_VF3, + PP_GRTAVFS_HW_VOLTAGE_GB, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4, + PP_GRTAVFS_HW_RESERVED_0, + PP_GRTAVFS_HW_RESERVED_1, + PP_GRTAVFS_HW_RESERVED_2, + PP_GRTAVFS_HW_RESERVED_3, + PP_GRTAVFS_HW_RESERVED_4, + PP_GRTAVFS_HW_RESERVED_5, + PP_GRTAVFS_HW_RESERVED_6, + PP_GRTAVFS_HW_FUSE_COUNT, +} PP_GRTAVFS_HW_FUSE_e; + +typedef enum { + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4, + PP_GRTAVFS_FW_COMMON_FUSE_COUNT, +} PP_GRTAVFS_FW_COMMON_FUSE_e; + +typedef enum { + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4, + PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY, + PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4, + PP_GRTAVFS_FW_SEP_FUSE_COUNT, +} PP_GRTAVFS_FW_SEP_FUSE_e; + +#define PP_NUM_RTAVFS_PWL_ZONES 5 + + +// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 +// Slope Q1.7, Offset Q1.2 +typedef struct { + int8_t Offset; // in Amps + uint8_t Padding; + uint16_t MaxCurrent; // in Amps +} SviTelemetryScale_t; + +#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1 + +#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0 +#define PP_OD_FEATURE_GFX_VMAX_BIT 1 +#define PP_OD_FEATURE_SOC_VMAX_BIT 2 +#define PP_OD_FEATURE_PPT_BIT 3 +#define PP_OD_FEATURE_FAN_CURVE_BIT 4 +#define PP_OD_FEATURE_FAN_LEGACY_BIT 5 +#define PP_OD_FEATURE_FULL_CTRL_BIT 6 +#define PP_OD_FEATURE_TDC_BIT 7 +#define PP_OD_FEATURE_GFXCLK_BIT 8 +#define PP_OD_FEATURE_UCLK_BIT 9 +#define PP_OD_FEATURE_FCLK_BIT 10 +#define PP_OD_FEATURE_ZERO_FAN_BIT 11 +#define PP_OD_FEATURE_TEMPERATURE_BIT 12 +#define PP_OD_FEATURE_EDC_BIT 13 +#define PP_OD_FEATURE_COUNT 14 + +typedef enum { + PP_OD_POWER_FEATURE_ALWAYS_ENABLED, + PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING, + PP_OD_POWER_FEATURE_ALWAYS_DISABLED, +} PP_OD_POWER_FEATURE_e; + +typedef enum { + FAN_MODE_AUTO = 0, + FAN_MODE_MANUAL_LINEAR, +} FanMode_e; + +typedef enum { + OD_NO_ERROR, + OD_REQUEST_ADVANCED_NOT_SUPPORTED, + OD_UNSUPPORTED_FEATURE, + OD_INVALID_FEATURE_COMBO_ERROR, + OD_GFXCLK_VF_CURVE_OFFSET_ERROR, + OD_VDD_GFX_VMAX_ERROR, + OD_VDD_SOC_VMAX_ERROR, + OD_PPT_ERROR, + OD_FAN_MIN_PWM_ERROR, + OD_FAN_ACOUSTIC_TARGET_ERROR, + OD_FAN_ACOUSTIC_LIMIT_ERROR, + OD_FAN_TARGET_TEMP_ERROR, + OD_FAN_ZERO_RPM_STOP_TEMP_ERROR, + OD_FAN_CURVE_PWM_ERROR, + OD_FAN_CURVE_TEMP_ERROR, + OD_FULL_CTRL_GFXCLK_ERROR, + OD_FULL_CTRL_UCLK_ERROR, + OD_FULL_CTRL_FCLK_ERROR, + OD_FULL_CTRL_VDD_GFX_ERROR, + OD_FULL_CTRL_VDD_SOC_ERROR, + OD_TDC_ERROR, + OD_GFXCLK_ERROR, + OD_UCLK_ERROR, + OD_FCLK_ERROR, + OD_OP_TEMP_ERROR, + OD_OP_GFX_EDC_ERROR, + OD_OP_GFX_PCC_ERROR, + OD_POWER_FEATURE_CTRL_ERROR, +} OD_FAIL_e; + +typedef struct { + uint32_t FeatureCtrlMask; + + //Voltage control + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; + + uint16_t VddGfxVmax; // in mV + uint16_t VddSocVmax; + + uint8_t IdlePwrSavingFeaturesCtrl; + uint8_t RuntimePwrSavingFeaturesCtrl; + uint16_t Padding; + + //Frequency changes + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz + uint16_t UclkFmin; // MHz + uint16_t UclkFmax; // MHz + uint16_t FclkFmin; + uint16_t FclkFmax; + + //PPT + int16_t Ppt; // % + int16_t Tdc; + + //Fan control + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + uint16_t FanMinimumPwm; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanTargetTemperature; // Degree Celcius + uint8_t FanZeroRpmEnable; + uint8_t FanZeroRpmStopTemp; + uint8_t FanMode; + uint8_t MaxOpTemp; + + uint8_t AdvancedOdModeEnabled; + uint8_t Padding1[3]; + + uint16_t GfxVoltageFullCtrlMode; + uint16_t SocVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + uint16_t FclkFullCtrlMode; + uint16_t Padding2; + + int16_t GfxEdc; + int16_t GfxPccLimitControl; + + uint32_t Spare[10]; + uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround +} OverDriveTable_t; + +typedef struct { + OverDriveTable_t OverDriveTable; + +} OverDriveTableExternal_t; + +typedef struct { + uint32_t FeatureCtrlMask; + + //Gfx Vf Curve + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; + //gfx Vmax + uint16_t VddGfxVmax; // in mV + //soc Vmax + uint16_t VddSocVmax; + + //gfxclk + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz + //uclk + uint16_t UclkFmin; // MHz + uint16_t UclkFmax; // MHz + //fclk + uint16_t FclkFmin; + uint16_t FclkFmax; + + //PPT + int16_t Ppt; // % + //TDC + int16_t Tdc; + + //Fan Curve + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + //Fan Legacy + uint16_t FanMinimumPwm; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanTargetTemperature; // Degree Celcius + //zero fan + uint8_t FanZeroRpmEnable; + //temperature + uint8_t MaxOpTemp; + uint8_t Padding[2]; + + //Full Ctrl + uint16_t GfxVoltageFullCtrlMode; + uint16_t SocVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + uint16_t FclkFullCtrlMode; + //EDC + int16_t GfxEdc; + int16_t GfxPccLimitControl; + int16_t Padding1; + + uint32_t Spare[5]; +} OverDriveLimits_t; + +typedef enum { + BOARD_GPIO_SMUIO_0, + BOARD_GPIO_SMUIO_1, + BOARD_GPIO_SMUIO_2, + BOARD_GPIO_SMUIO_3, + BOARD_GPIO_SMUIO_4, + BOARD_GPIO_SMUIO_5, + BOARD_GPIO_SMUIO_6, + BOARD_GPIO_SMUIO_7, + BOARD_GPIO_SMUIO_8, + BOARD_GPIO_SMUIO_9, + BOARD_GPIO_SMUIO_10, + BOARD_GPIO_SMUIO_11, + BOARD_GPIO_SMUIO_12, + BOARD_GPIO_SMUIO_13, + BOARD_GPIO_SMUIO_14, + BOARD_GPIO_SMUIO_15, + BOARD_GPIO_SMUIO_16, + BOARD_GPIO_SMUIO_17, + BOARD_GPIO_SMUIO_18, + BOARD_GPIO_SMUIO_19, + BOARD_GPIO_SMUIO_20, + BOARD_GPIO_SMUIO_21, + BOARD_GPIO_SMUIO_22, + BOARD_GPIO_SMUIO_23, + BOARD_GPIO_SMUIO_24, + BOARD_GPIO_SMUIO_25, + BOARD_GPIO_SMUIO_26, + BOARD_GPIO_SMUIO_27, + BOARD_GPIO_SMUIO_28, + BOARD_GPIO_SMUIO_29, + BOARD_GPIO_SMUIO_30, + BOARD_GPIO_SMUIO_31, + MAX_BOARD_GPIO_SMUIO_NUM, + BOARD_GPIO_DC_GEN_A, + BOARD_GPIO_DC_GEN_B, + BOARD_GPIO_DC_GEN_C, + BOARD_GPIO_DC_GEN_D, + BOARD_GPIO_DC_GEN_E, + BOARD_GPIO_DC_GEN_F, + BOARD_GPIO_DC_GEN_G, + BOARD_GPIO_DC_GENLK_CLK, + BOARD_GPIO_DC_GENLK_VSYNC, + BOARD_GPIO_DC_SWAPLOCK_A, + BOARD_GPIO_DC_SWAPLOCK_B, + MAX_BOARD_DC_GPIO_NUM, + BOARD_GPIO_LV_EN, +} BOARD_GPIO_TYPE_e; + +#define INVALID_BOARD_GPIO 0xFF + + +typedef struct { + //PLL 0 + uint16_t InitImuClk; + uint16_t InitSocclk; + uint16_t InitMpioclk; + uint16_t InitSmnclk; + //PLL 1 + uint16_t InitDispClk; + uint16_t InitDppClk; + uint16_t InitDprefclk; + uint16_t InitDcfclk; + uint16_t InitDtbclk; + uint16_t InitDbguSocClk; + //PLL 2 + uint16_t InitGfxclk_bypass; + uint16_t InitMp1clk; + uint16_t InitLclk; + uint16_t InitDbguBacoClk; + uint16_t InitBaco400clk; + uint16_t InitBaco1200clk_bypass; + uint16_t InitBaco700clk_bypass; + uint16_t InitBaco500clk; + // PLL 3 + uint16_t InitDclk0; + uint16_t InitVclk0; + // PLL 4 + uint16_t InitFclk; + uint16_t Padding1; + // PLL 5 + //UCLK clocks, assumed all UCLK instances will be the same. + uint8_t InitUclkLevel; // =0,1,2,3,4,5 frequency from FreqTableUclk + + uint8_t Padding[3]; + + uint32_t InitVcoFreqPll0; //smu_socclk_t + uint32_t InitVcoFreqPll1; //smu_displayclk_t + uint32_t InitVcoFreqPll2; //smu_nbioclk_t + uint32_t InitVcoFreqPll3; //smu_vcnclk_t + uint32_t InitVcoFreqPll4; //smu_fclk_t + uint32_t InitVcoFreqPll5; //smu_uclk_01_t + uint32_t InitVcoFreqPll6; //smu_uclk_23_t + uint32_t InitVcoFreqPll7; //smu_uclk_45_t + uint32_t InitVcoFreqPll8; //smu_uclk_67_t + + //encoding will be SVI3 + uint16_t InitGfx; // In mV(Q2) , should be 0? + uint16_t InitSoc; // In mV(Q2) + uint16_t InitVddIoMem; // In mV(Q2) MemVdd + uint16_t InitVddCiMem; // In mV(Q2) VMemP + + //uint16_t Padding2; + + uint32_t Spare[8]; +} BootValues_t; + +typedef struct { + uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts + uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps + + uint16_t Temperature[TEMP_COUNT]; // Celsius + + uint8_t PwmLimitMin; + uint8_t PwmLimitMax; + uint8_t FanTargetTemperature; + uint8_t Spare1[1]; + + uint16_t AcousticTargetRpmThresholdMin; + uint16_t AcousticTargetRpmThresholdMax; + + uint16_t AcousticLimitRpmThresholdMin; + uint16_t AcousticLimitRpmThresholdMax; + + uint16_t PccLimitMin; + uint16_t PccLimitMax; + + uint16_t FanStopTempMin; + uint16_t FanStopTempMax; + uint16_t FanStartTempMin; + uint16_t FanStartTempMax; + + uint16_t PowerMinPpt0[POWER_SOURCE_COUNT]; + uint32_t Spare[11]; +} MsgLimits_t; + +typedef struct { + uint16_t BaseClockAc; + uint16_t GameClockAc; + uint16_t BoostClockAc; + uint16_t BaseClockDc; + uint16_t GameClockDc; + uint16_t BoostClockDc; + + uint32_t Reserved[4]; +} DriverReportedClocks_t; + +typedef struct { + uint8_t DcBtcEnabled; + uint8_t Padding[3]; + + uint16_t DcTol; // mV Q2 + uint16_t DcBtcGb; // mV Q2 + + uint16_t DcBtcMin; // mV Q2 + uint16_t DcBtcMax; // mV Q2 + + LinearInt_t DcBtcGbScalar; +} AvfsDcBtcParams_t; + +typedef struct { + uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C + uint16_t VftFMin; // in MHz + uint16_t VInversion; // in mV Q2 + QuadraticInt_t qVft[AVFS_TEMP_COUNT]; + QuadraticInt_t qAvfsGb; + QuadraticInt_t qAvfsGb2; +} AvfsFuseOverride_t; + +//all settings maintained by PFE team +typedef struct { + uint8_t Version; + uint8_t Spare8[3]; + // SECTION: Feature Control + uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping + // SECTION: Advanced Options + uint32_t DebugOverrides; + + uint32_t Spare[2]; +} PFE_Settings_t; + +typedef struct { + // SECTION: Version + uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different) + + // SECTION: Miscellaneous Configuration + uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e + uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e + uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT + uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e + + // SECTION: Infrastructure Limits + uint8_t SocketPowerLimitSpare[10]; + + //if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars + //relative index 0 + uint8_t EnableLegacyPptLimit; + uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support + + uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting + + uint8_t PaddingPpt[7]; + + uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only + + uint16_t PaddingInfra; + + // Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years) + uint32_t FitControllerFailureRateLimit; //in IEEE float + //Expected GFX Duty Cycle at Vmax. + uint32_t FitControllerGfxDutyCycle; // in IEEE float + //Expected SOC Duty Cycle at Vmax. + uint32_t FitControllerSocDutyCycle; // in IEEE float + + //This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block. + uint32_t FitControllerSocOffset; //in IEEE float + + uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping + + + // SECTION: Voltage Control Parameters + uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE) + + uint8_t Padding[2]; + uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE + + // Voltage Limits + uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled + uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled + + //Vmin Optimizations + int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin + int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin + uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot. + uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold. + uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot. + uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold. + uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin + uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot + uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold + + //This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for. + uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT]; + //Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts. + uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT]; + //Scalar coefficient of the PSM aging degradation function + uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM + //Exponential coefficient of the PSM aging degradation function + uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM + //Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. + uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN + //Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. + uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN + + uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT]; + uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT]; + + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + + QuadraticInt_t Gfx_Vmin_droop; + QuadraticInt_t Soc_Vmin_droop; + uint32_t SpareVmin[6]; + + //SECTION: DPM Configuration 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableShadowUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint16_t GfxclkAibFmax; + uint16_t GfxclkFreqCap; + + //GFX Idle Power Settings + uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz + uint16_t GfxclkFgfxoffExitImu; // Exit/Entry in IMU stage (BYPASS), in Mhz + uint16_t GfxclkFgfxoffExitRlc; // Exit in RLC stage (PLL), in Mhz + uint16_t GfxclkThrottleClock; //Used primarily in DCS + uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages + uint8_t GfxIdlePadding; + + uint8_t SmsRepairWRCKClkDivEn; + uint8_t SmsRepairWRCKClkDivVal; + uint8_t GfxOffEntryEarlyMGCGEn; + uint8_t GfxOffEntryForceCGCGEn; + uint8_t GfxOffEntryForceCGCGDelayEn; + uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds + + uint16_t GfxclkFreqGfxUlv; // in MHz + uint8_t GfxIdlePadding2[2]; + uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry + uint32_t GfxoffSpare[15]; + + // DFLL + uint16_t DfllMstrOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias + uint16_t DfllSlvOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias + uint32_t DfllBtcMasterScalerM; + int32_t DfllBtcMasterScalerB; + uint32_t DfllBtcSlaveScalerM; + int32_t DfllBtcSlaveScalerB; + + uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg + uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg + uint32_t GfxDfllSpare[9]; + + // DVO + uint32_t DvoPsmDownThresholdVoltage; //Voltage float + uint32_t DvoPsmUpThresholdVoltage; //Voltage float + uint32_t DvoFmaxLowScaler; //Unitless float + + // GFX DCS + uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase + uint16_t PaddingDcs; + + uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase + uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. + + uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. + + uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. + uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. + + uint32_t DcsPfGfxFopt; //Default to GFX FMIN + uint32_t DcsPfUclkFopt; //Default to UCLK FMIN + + uint8_t FoptEnabled; + uint8_t DcsSpare2[3]; + uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation, IEEE754 float + uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation, IEEE754 float + uint32_t DcsSpare[9]; + + // UCLK section + uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations + uint8_t PaddingMem[3]; + + uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 6 Primary SW DPM states (6 + 6 Shadow) + uint8_t UclkDpmShadowPstates [NUM_UCLK_DPM_LEVELS]; // 6 Shadow SW DPM states (6 + 6 Shadow) + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + uint8_t FreqTableShadowUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t DalDcModeMaxUclkFreq; + uint8_t PaddingsMem[2]; + //FCLK Section + uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value + uint16_t PaddingFclk; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // SECTION: VDD_GFX AVFS + uint8_t OverrideGfxAvfsFuses; + uint8_t GfxAvfsPadding[3]; + + uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain + uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding + //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + + uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; + uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; + + uint32_t SocFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + uint32_t GfxL2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + //uint32_t GfxSeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + uint32_t spare_FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + + uint32_t Soc_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t Gfx_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t Gfx_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t dGbV_dT_vmin; + uint32_t dGbV_dT_vmax; + + //Unused: PMFW-9370 + uint32_t V2F_vmin_range_low; + uint32_t V2F_vmin_range_high; + uint32_t V2F_vmax_range_low; + uint32_t V2F_vmax_range_high; + + AvfsDcBtcParams_t DcBtcGfxParams; + QuadraticInt_t SSCurve_GFX; + uint32_t GfxAvfsSpare[29]; + + //SECTION: VDD_SOC AVFS + uint8_t OverrideSocAvfsFuses; + uint8_t MinSocAvfsRevision; + uint8_t SocAvfsPadding[2]; + + AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT]; + + DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb + + LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V + + QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V + + AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT]; + + QuadraticInt_t SSCurve_SOC; + uint32_t SocAvfsSpare[29]; + + //SECTION: Boot clock and voltage values + BootValues_t BootValues; + + //SECTION: Driver Reported Clocks + DriverReportedClocks_t DriverReportedClocks; + + //SECTION: Message Limits + MsgLimits_t MsgLimits; + + //SECTION: OverDrive Limits + OverDriveLimits_t OverDriveLimitsBasicMin; + OverDriveLimits_t OverDriveLimitsBasicMax; + OverDriveLimits_t OverDriveLimitsAdvancedMin; + OverDriveLimits_t OverDriveLimitsAdvancedMax; + + // Section: Total Board Power idle vs active coefficients + uint8_t TotalBoardPowerSupport; + uint8_t TotalBoardPowerPadding[1]; + uint16_t TotalBoardPowerRoc; + + //PMFW-11158 + QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT]; + + // APT GFX to UCLK mapping + int32_t AptUclkGfxclkLookup[POWER_SOURCE_COUNT][6]; + uint32_t AptUclkGfxclkLookupHyst[POWER_SOURCE_COUNT][6]; + uint32_t AptPadding; + + // Xvmin didt + QuadraticInt_t GfxXvminDidtDroopThresh; + uint32_t GfxXvminDidtResetDDWait; + uint32_t GfxXvminDidtClkStopWait; + uint32_t GfxXvminDidtFcsStepCtrl; + uint32_t GfxXvminDidtFcsWaitCtrl; + + // PSM based didt controller + uint32_t PsmModeEnabled; //0: all disabled 1: static mode only 2: dynamic mode only 3:static + dynamic mode + uint32_t P2v_a; // floating point in U32 format + uint32_t P2v_b; + uint32_t P2v_c; + uint32_t T2p_a; + uint32_t T2p_b; + uint32_t T2p_c; + uint32_t P2vTemp; + QuadraticInt_t PsmDidtStaticSettings; + QuadraticInt_t PsmDidtDynamicSettings; + uint8_t PsmDidtAvgDiv; + uint8_t PsmDidtForceStall; + uint16_t PsmDidtReleaseTimer; + uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog + // CAC EDC + uint32_t Leakage_C0; // in IEEE float + uint32_t Leakage_C1; // in IEEE float + uint32_t Leakage_C2; // in IEEE float + uint32_t Leakage_C3; // in IEEE float + uint32_t Leakage_C4; // in IEEE float + uint32_t Leakage_C5; // in IEEE float + uint32_t GFX_CLK_SCALAR; // in IEEE float + uint32_t GFX_CLK_INTERCEPT; // in IEEE float + uint32_t GFX_CAC_M; // in IEEE float + uint32_t GFX_CAC_B; // in IEEE float + uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float + uint32_t DynToTotalCacScalar; // in IEEE + // GFX EDC XVMIN + uint32_t XVmin_Gfx_EdcThreshScalar; + uint32_t XVmin_Gfx_EdcEnableFreq; + uint32_t XVmin_Gfx_EdcPccAsStepCtrl; + uint32_t XVmin_Gfx_EdcPccAsWaitCtrl; + uint16_t XVmin_Gfx_EdcThreshold; + uint16_t XVmin_Gfx_EdcFiltHysWaitCtrl; + // SOC EDC XVMIN + uint32_t XVmin_Soc_EdcThreshScalar; + uint32_t XVmin_Soc_EdcEnableFreq; + uint32_t XVmin_Soc_EdcThreshold; // LPF: number of cycles Xvmin_trig_filt will react. + uint16_t XVmin_Soc_EdcStepUpTime; // 10 bit, refclk count to step up throttle when PCC remains asserted. + uint16_t XVmin_Soc_EdcStepDownTime;// 10 bit, refclk count to step down throttle when PCC remains asserted. + uint8_t XVmin_Soc_EdcInitPccStep; // 3 bit, First Pcc Step number that will applied when PCC asserts. + uint8_t PaddingSocEdc[3]; + + // Fuse Override for SOC and GFX XVMIN + uint8_t GfxXvminFuseOverride; + uint8_t SocXvminFuseOverride; + uint8_t PaddingXvminFuseOverride[2]; + uint8_t GfxXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value + uint8_t GfxXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value + uint8_t SocXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value + uint8_t SocXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value + + + uint16_t GfxXvminFddVolt0; // low voltage, in VID + uint16_t GfxXvminFddVolt1; // mid voltage, in VID + uint16_t GfxXvminFddVolt2; // high voltage, in VID + uint16_t SocXvminFddVolt0; // low voltage, in VID + uint16_t SocXvminFddVolt1; // mid voltage, in VID + uint16_t SocXvminFddVolt2; // high voltage, in VID + uint16_t GfxXvminDsFddDsm[6]; // XVMIN DS, same organization with fuse + uint16_t GfxXvminEdcFddDsm[6];// XVMIN GFX EDC, same organization with fuse + uint16_t SocXvminEdcFddDsm[6];// XVMIN SOC EDC, same organization with fuse + + // SECTION: Sku Reserved + uint32_t Spare; + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} SkuTable_t; + +typedef struct { + uint8_t SlewRateConditions; + uint8_t LoadLineAdjust; + uint8_t VoutOffset; + uint8_t VidMax; + uint8_t VidMin; + uint8_t TenBitTelEn; + uint8_t SixteenBitTelEn; + uint8_t OcpThresh; + uint8_t OcpWarnThresh; + uint8_t OcpSettings; + uint8_t VrhotThresh; + uint8_t OtpThresh; + uint8_t UvpOvpDeltaRef; + uint8_t PhaseShed; + uint8_t Padding[10]; + uint32_t SettingOverrideMask; +} Svi3RegulatorSettings_t; + +typedef struct { + // SECTION: Version + uint32_t Version; //should be unique to each board type + + // SECTION: I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + //SECTION SVI3 Board Parameters + uint8_t SlaveAddrMapping[SVI_PLANE_COUNT]; + uint8_t VrPsiSupport[SVI_PLANE_COUNT]; + + uint32_t Svi3SvcSpeed; + uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3 + + // SECTION: Voltage Regulator Settings + Svi3RegulatorSettings_t Svi3RegSettings[SVI_PLANE_COUNT]; + + // SECTION: GPIO Settings + uint8_t LedOffGpio; + uint8_t FanOffGpio; + uint8_t GfxVrPowerStageOffGpio; + + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t LedEnableMask; + + uint8_t LedPcie; // GPIO number for PCIE results + uint8_t LedError; // GPIO number for Error Cases + uint8_t PaddingLed; + + // SECTION: Clock Spread Spectrum + + // UCLK Spread Spectrum + uint8_t UclkTrainingModeSpreadPercent; // Q4.4 + uint8_t UclkSpreadPadding; + uint16_t UclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT]; + + // DFLL Spread Spectrum + uint8_t GfxclkSpreadEnable; + + // FCLK Spread Spectrum + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // Section: Memory Config + uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e + uint8_t PaddingMem1[7]; + + // SECTION: UMC feature flags + uint8_t HsrEnabled; + uint8_t VddqOffEnabled; + uint8_t PaddingUmcFlags[2]; + + uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued + uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS + + uint8_t FuseWritePowerMuxPresent; + uint8_t FuseWritePadding[3]; + + // SECTION: EDC Params + uint32_t LoadlineGfx; + uint32_t LoadlineSoc; + uint32_t GfxEdcLimit; + uint32_t SocEdcLimit; + + uint32_t RestBoardPower; //power consumed by board that is not captured by the SVI3 input telemetry + uint32_t ConnectorsImpedance; // impedance of the input ATX power connectors + + uint8_t EpcsSens0; //GPIO number for External Power Connector Support Sense0 + uint8_t EpcsSens1; //GPIO Number for External Power Connector Support Sense1 + uint8_t PaddingEpcs[2]; + + // SECTION: Board Reserved + uint32_t BoardSpare[52]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} BoardTable_t; + +typedef struct { + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported + + uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature + + int16_t TotalIdleBoardPowerM; + int16_t TotalIdleBoardPowerB; + int16_t TotalBoardPowerM; + int16_t TotalBoardPowerB; + + uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input + + // SECTION: Fan Control + uint16_t FanStopTemp[TEMP_COUNT]; //Celsius + uint16_t FanStartTemp[TEMP_COUNT]; //Celsius + + uint16_t FanGain[TEMP_COUNT]; + + uint16_t FanPwmMin; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanMaximumRpm; + uint16_t MGpuAcousticLimitRpmThreshold; + uint16_t FanTargetGfxclk; + uint32_t TempInputSelectMask; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + uint16_t FanPadding; + uint16_t FanTargetTemperature[TEMP_COUNT]; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + uint16_t FwCtfLimit[TEMP_COUNT]; + + uint16_t IntakeTempEnableRPM; + int16_t IntakeTempOffsetTemp; + uint16_t IntakeTempReleaseTemp; + uint16_t IntakeTempHighIntakeAcousticLimit; + + uint16_t IntakeTempAcouticLimitReleaseRate; + int16_t FanAbnormalTempLimitOffset; // FanStalledTempLimitOffset + uint16_t FanStalledTriggerRpm; // + uint16_t FanAbnormalTriggerRpmCoeff; // FanAbnormalTriggerRpm + + uint16_t FanSpare[1]; + uint8_t FanIntakeSensorSupport; + uint8_t FanIntakePadding; + uint32_t FanAmbientPerfBoostThreshold; + uint32_t FanSpare2[12]; + + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix + uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron + uint16_t TemperatureFwCtfLimit_Hynix; + uint16_t TemperatureFwCtfLimit_Micron; + + // SECTION: Board Reserved + uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported + uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift + uint16_t CustomSkuSpare16b; + uint32_t CustomSkuSpare32b[10]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} CustomSkuTable_t; + +typedef struct { + PFE_Settings_t PFE_Settings; + SkuTable_t SkuTable; + CustomSkuTable_t CustomSkuTable; + BoardTable_t BoardTable; +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t FclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + uint16_t UclkMaxActivityLpfTau; + uint16_t SocketPowerLpfTau; + uint16_t VcnClkAverageLpfTau; + uint16_t VcnUsageAverageLpfTau; + uint16_t PcieActivityLpTau; +} DriverSmuConfig_t; + +typedef struct { + DriverSmuConfig_t DriverSmuConfig; + + uint32_t Spare[8]; + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfigExternal_t; + + +typedef struct { + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint16_t Padding; + + uint32_t Spare[32]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use + +} DriverInfoTable_t; + +typedef struct { + uint32_t CurrClock[PPCLK_COUNT]; + + uint16_t AverageGfxclkFrequencyTarget; + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageFclkFrequencyPreDs; + uint16_t AverageFclkFrequencyPostDs; + uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock + uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock + uint16_t AverageVclk0Frequency ; + uint16_t AverageDclk0Frequency ; + uint16_t AverageVclk1Frequency ; + uint16_t AverageDclk1Frequency ; + uint16_t PCIeBusy ; + uint16_t dGPU_W_MAX ; + uint16_t padding ; + + uint32_t MetricsCounter ; + + uint16_t AvgVoltage[SVI_PLANE_COUNT]; + uint16_t AvgCurrent[SVI_PLANE_COUNT]; + + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint16_t Vcn0ActivityPercentage ; + uint16_t Vcn1ActivityPercentage ; + + uint32_t EnergyAccumulator; + uint16_t AverageSocketPower; + uint16_t AverageTotalBoardPower; + + uint16_t AvgTemperature[TEMP_COUNT]; + uint16_t AvgTemperatureFanIntake; + + uint8_t PcieRate ; + uint8_t PcieWidth ; + + uint8_t AvgFanPwm; + uint8_t Padding[1]; + uint16_t AvgFanRpm; + + + uint8_t ThrottlingPercentage[THROTTLER_COUNT]; + uint8_t padding1[3]; + + //metrics for D3hot entry/exit and driver ARM msgs + uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; + + uint16_t ApuSTAPMSmartShiftLimit; + uint16_t ApuSTAPMLimit; + uint16_t AvgApuSocketPower; + + uint16_t AverageUclkActivity_MAX; + + uint32_t PublicSerialNumberLower; + uint32_t PublicSerialNumberUpper; + +} SmuMetrics_t; + +typedef struct { + SmuMetrics_t SmuMetrics; + uint32_t Spare[30]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetricsExternal_t; + +typedef struct { + uint8_t WmSetting; + uint8_t Flags; + uint8_t Padding[2]; + +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WATERMARKS_CLOCK_RANGE = 0, + WATERMARKS_DUMMY_PSTATE, + WATERMARKS_MALL, + WATERMARKS_COUNT, +} WATERMARKS_FLAGS_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES]; +} Watermarks_t; + +typedef struct { + Watermarks_t Watermarks; + uint32_t Spare[16]; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarksExternal_t; + +typedef struct { + uint16_t avgPsmCount[76]; + uint16_t minPsmCount[76]; + uint16_t maxPsmCount[76]; + float avgPsmVoltage[76]; + float minPsmVoltage[76]; + float maxPsmVoltage[76]; +} AvfsDebugTable_t; + +typedef struct { + AvfsDebugTable_t AvfsDebugTable; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTableExternal_t; + + +typedef struct { + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t PaddingGfx; + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Fclk_ActiveHystLimit; + uint8_t Fclk_IdleHystLimit; + uint8_t Fclk_FPS; + uint8_t Fclk_MinActiveFreqType; + uint8_t Fclk_BoosterFreqType; + uint8_t PaddingFclk; + uint16_t Fclk_MinActiveFreq; // MHz + uint16_t Fclk_BoosterFreq; // MHz + uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Fclk_PD_Data_limit_a; // Q16 + uint32_t Fclk_PD_Data_limit_b; // Q16 + uint32_t Fclk_PD_Data_limit_c; // Q16 + uint32_t Fclk_PD_Data_error_coeff; // Q16 + uint32_t Fclk_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16 + uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS]; + uint16_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS]; + uint16_t Mem_Fps; + +} DpmActivityMonitorCoeffInt_t; + + +typedef struct { + DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt; + uint32_t MmHubPadding[8]; // SMU internal use +} DpmActivityMonitorCoeffIntExternal_t; + + + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_WINDOW_3D_BIT 7 +#define WORKLOAD_PPLIB_DIRECT_ML_BIT 8 +#define WORKLOAD_PPLIB_CGVDI_BIT 9 +#define WORKLOAD_PPLIB_COUNT 10 + + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF +#define TABLE_TRANSFER_PENDING 0xAB + +// Table types +#define TABLE_PPTABLE 0 +#define TABLE_COMBO_PPTABLE 1 +#define TABLE_WATERMARKS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +#define TABLE_ACTIVITY_MONITOR_COEFF 7 +#define TABLE_OVERDRIVE 8 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_DRIVER_INFO 10 +#define TABLE_ECCINFO 11 +#define TABLE_CUSTOM_SKUTABLE 12 +#define TABLE_COUNT 13 + +//IH Interupt ID +#define IH_INTERRUPT_ID_TO_DRIVER 0xFE +#define IH_INTERRUPT_CONTEXT_ID_BACO 0x2 +#define IH_INTERRUPT_CONTEXT_ID_AC 0x3 +#define IH_INTERRUPT_CONTEXT_ID_DC 0x4 +#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 +#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 +#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 +#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 +#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h new file mode 100644 index 000000000000..de2e442281ff --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h @@ -0,0 +1,140 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_V14_0_2_PPSMC_H +#define SMU_V14_0_2_PPSMC_H + +#define PPSMC_VERSION 0x1 + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +// BASIC +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC +#define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrLow 0xF +#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 +#define PPSMC_MSG_SetToolsDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_UseDefaultPPTable 0x14 + +//BACO/BAMACO/BOMACO +#define PPSMC_MSG_EnterBaco 0x15 +#define PPSMC_MSG_ExitBaco 0x16 +#define PPSMC_MSG_ArmD3 0x17 +#define PPSMC_MSG_BacoAudioD3PME 0x18 + +//DPM +#define PPSMC_MSG_SetSoftMinByFreq 0x19 +#define PPSMC_MSG_SetSoftMaxByFreq 0x1A +#define PPSMC_MSG_SetHardMinByFreq 0x1B +#define PPSMC_MSG_SetHardMaxByFreq 0x1C +#define PPSMC_MSG_GetMinDpmFreq 0x1D +#define PPSMC_MSG_GetMaxDpmFreq 0x1E +#define PPSMC_MSG_GetDpmFreqByIndex 0x1F +#define PPSMC_MSG_OverridePcieParameters 0x20 + +//DramLog Set DramAddr +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x21 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x22 +#define PPSMC_MSG_DramLogSetDramSize 0x23 +#define PPSMC_MSG_SetWorkloadMask 0x24 + +#define PPSMC_MSG_GetVoltageByDpm 0x25 // Can be removed +#define PPSMC_MSG_SetVideoFps 0x26 // Can be removed +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27 + +//Power Gating +#define PPSMC_MSG_AllowGfxOff 0x28 +#define PPSMC_MSG_DisallowGfxOff 0x29 +#define PPSMC_MSG_PowerUpVcn 0x2A +#define PPSMC_MSG_PowerDownVcn 0x2B +#define PPSMC_MSG_PowerUpJpeg 0x2C +#define PPSMC_MSG_PowerDownJpeg 0x2D + +//Resets +#define PPSMC_MSG_PrepareMp1ForUnload 0x2E +#define PPSMC_MSG_Mode1Reset 0x2F + +//Set SystemVirtual DramAddrHigh +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x31 +//ACDC Power Source +#define PPSMC_MSG_SetPptLimit 0x32 +#define PPSMC_MSG_GetPptLimit 0x33 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x34 +#define PPSMC_MSG_NotifyPowerSource 0x35 + +//BTC +#define PPSMC_MSG_RunDcBtc 0x36 + +// 0x37 + +//Others +#define PPSMC_MSG_SetTemperatureInputSelect 0x38 // Can be removed +#define PPSMC_MSG_SetFwDstatesMask 0x39 +#define PPSMC_MSG_SetThrottlerMask 0x3A + +#define PPSMC_MSG_SetExternalClientDfCstateAllow 0x3B + +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x3C + +//STB to dram log +#define PPSMC_MSG_DumpSTBtoDram 0x3D +#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E +#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40 +#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41 + +#define PPSMC_MSG_AllowGfxDcs 0x43 +#define PPSMC_MSG_DisallowGfxDcs 0x44 +#define PPSMC_MSG_EnableAudioStutterWA 0x45 +#define PPSMC_MSG_PowerUpUmsch 0x46 +#define PPSMC_MSG_PowerDownUmsch 0x47 +#define PPSMC_MSG_SetDcsArch 0x48 +#define PPSMC_MSG_TriggerVFFLR 0x49 +#define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x4A +#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B +#define PPSMC_MSG_SetPriorityDeltaGain 0x4C +#define PPSMC_MSG_AllowIHHostInterrupt 0x4D +#define PPSMC_MSG_Mode3Reset 0x4F +#define PPSMC_Message_Count 0x50 +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 6cdfee5052d9..1fc4557e6fb4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -28,7 +28,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25 #define FEATURE_MASK(feature) (1ULL << feature) @@ -39,7 +39,8 @@ #define MP1_SRAM 0x03c00004 /* address block */ -#define smnMP1_FIRMWARE_FLAGS 0x3010028 +#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028 +#define smnMP1_FIRMWARE_FLAGS 0x3010024 #define smnMP1_PUB_CTRL 0x3010d10 #define MAX_DPM_LEVELS 16 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h new file mode 100644 index 000000000000..4a3fde89aed7 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h @@ -0,0 +1,164 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_14_0_2_PPTABLE_H +#define SMU_14_0_2_PPTABLE_H + + +#pragma pack(push, 1) + +#define SMU_14_0_2_TABLE_FORMAT_REVISION 3 + +// POWERPLAYTABLE::ulPlatformCaps +#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. +#define SMU_14_0_2_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS. +#define SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly. +#define SMU_14_0_2_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry. +#define SMU_14_0_2_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry. +#define SMU_14_0_2_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate. +#define SMU_14_0_2_PP_PLATFORM_CAP_LEDSUPPORTED 0x40 // This cap indicates whether board supports the LED. +#define SMU_14_0_2_PP_PLATFORM_CAP_MOBILEOVERDRIVE 0x80 // This cap indicates whether board supports the Mobile Overdrive. + +// SMU_14_0_2_PP_THERMALCONTROLLER - Thermal Controller Type +#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 + +#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD +#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 + +enum SMU_14_0_2_OD_SW_FEATURE_CAP +{ + SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT = 0, + SMU_14_0_2_ODCAP_POWER_MODE = 1, + SMU_14_0_2_ODCAP_AUTO_UV_ENGINE = 2, + SMU_14_0_2_ODCAP_AUTO_OC_ENGINE = 3, + SMU_14_0_2_ODCAP_AUTO_OC_MEMORY = 4, + SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE = 5, + SMU_14_0_2_ODCAP_MANUAL_AC_TIMING = 6, + SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER = 7, + SMU_14_0_2_ODCAP_AUTO_SOC_UV = 8, + SMU_14_0_2_ODCAP_COUNT = 9, +}; + +enum SMU_14_0_2_OD_SW_FEATURE_ID +{ + SMU_14_0_2_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, // Auto Fan Acoustic RPM + SMU_14_0_2_ODFEATURE_POWER_MODE = 1 << SMU_14_0_2_ODCAP_POWER_MODE, // Optimized GPU Power Mode + SMU_14_0_2_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_UV_ENGINE, // Auto Under Volt GFXCLK + SMU_14_0_2_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_OC_ENGINE, // Auto Over Clock GFXCLK + SMU_14_0_2_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_14_0_2_ODCAP_AUTO_OC_MEMORY, // Auto Over Clock MCLK + SMU_14_0_2_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE, // Auto AC Timing Tuning + SMU_14_0_2_ODFEATURE_MANUAL_AC_TIMING = 1 << SMU_14_0_2_ODCAP_MANUAL_AC_TIMING, // Manual fine grain AC Timing tuning + SMU_14_0_2_ODFEATURE_AUTO_VF_CURVE_OPTIMIZER = 1 << SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER, // Fine grain auto VF curve tuning + SMU_14_0_2_ODFEATURE_AUTO_SOC_UV = 1 << SMU_14_0_2_ODCAP_AUTO_SOC_UV, // Auto Unver Volt VDDSOC +}; + +#define SMU_14_0_2_MAX_ODFEATURE 32 // Maximum Number of OD Features + +enum SMU_14_0_2_OD_SW_FEATURE_SETTING_ID +{ + SMU_14_0_2_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT = 0, + SMU_14_0_2_ODSETTING_POWER_MODE = 1, + SMU_14_0_2_ODSETTING_AUTOUVENGINE = 2, + SMU_14_0_2_ODSETTING_AUTOOCENGINE = 3, + SMU_14_0_2_ODSETTING_AUTOOCMEMORY = 4, + SMU_14_0_2_ODSETTING_ACTIMING = 5, + SMU_14_0_2_ODSETTING_MANUAL_AC_TIMING = 6, + SMU_14_0_2_ODSETTING_AUTO_VF_CURVE_OPTIMIZER = 7, + SMU_14_0_2_ODSETTING_AUTO_SOC_UV = 8, + SMU_14_0_2_ODSETTING_COUNT = 9, +}; +#define SMU_14_0_2_MAX_ODSETTING 64 // Maximum Number of ODSettings + +enum SMU_14_0_2_PWRMODE_SETTING +{ + SMU_14_0_2_PMSETTING_POWER_LIMIT_QUIET = 0, + SMU_14_0_2_PMSETTING_POWER_LIMIT_BALANCE, + SMU_14_0_2_PMSETTING_POWER_LIMIT_TURBO, + SMU_14_0_2_PMSETTING_POWER_LIMIT_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, +}; +#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings + +enum SMU_14_0_2_overdrive_table_id +{ + SMU_14_0_2_OVERDRIVE_TABLE_BASIC = 0, + SMU_14_0_2_OVERDRIVE_TABLE_ADVANCED = 1, + SMU_14_0_2_OVERDRIVE_TABLE_COUNT = 2, +}; + +struct smu_14_0_2_overdrive_table +{ + uint8_t revision; // Revision = SMU_14_0_2_PP_OVERDRIVE_VERSION + uint8_t reserve[3]; // Zero filled field reserved for future use + uint8_t cap[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODFEATURE]; // OD feature support flags + int32_t max[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // maximum settings + int32_t min[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // minimum settings + int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings +}; + +struct smu_14_0_2_powerplay_table +{ + struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. + uint8_t table_revision; // PPGen use only: table_revision = 3 + uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). + uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) + uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. + uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. + uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. + uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. + uint16_t pmfw_board_table_size; // The size of BoardTable_t. + uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. + uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. + uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base + uint32_t golden_revision; // PPGen use only: PP Table Revision on the Golden Data Base + uint16_t format_id; // PPGen use only: PPTable for different ASICs. + uint32_t platform_caps; // POWERPLAYTABLE::ulPlatformCaps + + uint8_t thermal_controller_type; // one of smu_14_0_2_PP_THERMALCONTROLLER + + uint16_t small_power_limit1; + uint16_t small_power_limit2; + uint16_t boost_power_limit; // For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit. + uint16_t software_shutdown_temp; + + uint8_t reserve[143]; // Zero filled field reserved for future use + + struct smu_14_0_2_overdrive_table overdrive_table; + + PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes +}; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 16179e170874..bc1f3fe4d82a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -3118,12 +3118,25 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, return 0; } +static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank) +{ + int error_code; + + if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) + error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); + else + error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + + return error_code & 0xff; +} + static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { .max_ue_bank_count = 12, .max_ce_bank_count = 12, .set_debug_mode = aca_smu_set_debug_mode, .get_valid_aca_count = aca_smu_get_valid_aca_count, .get_valid_aca_bank = aca_smu_get_valid_aca_bank, + .parse_error_code = aca_smu_parse_error_code, }; static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile index ddbac5c655f7..4593e29e8ff8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o +SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o smu_v14_0_2_ppt.o AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 2c3517397b14..3bc9662fbd28 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -38,8 +38,13 @@ #include "amdgpu_ras.h" #include "smu_cmn.h" -#include "asic_reg/mp/mp_14_0_0_offset.h" -#include "asic_reg/mp/mp_14_0_0_sh_mask.h" +#include "asic_reg/mp/mp_14_0_2_offset.h" +#include "asic_reg/mp/mp_14_0_2_sh_mask.h" + +#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341 +#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0 +#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342 +#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0 /* * DO NOT use these for err/warn/info/debug messages. @@ -52,6 +57,7 @@ #undef pr_debug MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 @@ -59,7 +65,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; char fw_name[30]; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -106,7 +112,6 @@ void smu_v14_0_fini_microcode(struct smu_context *smu) int smu_v14_0_load_microcode(struct smu_context *smu) { -#if 0 struct amdgpu_device *adev = smu->adev; const uint32_t *src; const struct smc_firmware_header_v1_0 *hdr; @@ -131,8 +136,12 @@ int smu_v14_0_load_microcode(struct smu_context *smu) 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); for (i = 0; i < adev->usec_timeout; i++) { - mp1_fw_flags = RREG32_PCIE(MP1_Public | - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); + else + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) break; @@ -142,9 +151,7 @@ int smu_v14_0_load_microcode(struct smu_context *smu) if (i == adev->usec_timeout) return -ETIME; -#endif return 0; - } int smu_v14_0_init_pptable_microcode(struct smu_context *smu) @@ -165,6 +172,10 @@ int smu_v14_0_init_pptable_microcode(struct smu_context *smu) if (!adev->scpm_enabled) return 0; + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) || + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3))) + return 0; + /* override pptable_id from driver parameter */ if (amdgpu_smu_pptable_id >= 0) { pptable_id = amdgpu_smu_pptable_id; @@ -198,7 +209,11 @@ int smu_v14_0_check_fw_status(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; - mp1_fw_flags = RREG32_PCIE(MP1_Public | + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); + else + mp1_fw_flags = RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> @@ -227,16 +242,16 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) adev->pm.fw_version = smu_version; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(14, 0, 2): - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; - break; case IP_VERSION(14, 0, 0): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; case IP_VERSION(14, 0, 1): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; break; - + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; + break; default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", amdgpu_ip_version(adev, MP1_HWIP, 0)); @@ -738,9 +753,9 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) struct amdgpu_device *adev = smu->adev; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 2): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -841,9 +856,15 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, // TODO /* For MP1 SW irqs */ - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); + } else { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + } break; case AMDGPU_IRQ_STATE_ENABLE: @@ -851,14 +872,25 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, // TODO /* For MP1 SW irqs */ - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); - - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); + } else { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + } break; default: @@ -868,11 +900,32 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, return 0; } +#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ +#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ + static int smu_v14_0_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - // TODO + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t client_id = entry->client_id; + uint32_t src_id = entry->src_id; + + if (client_id == SOC15_IH_CLIENTID_THM) { + switch (src_id) { + case THM_11_0__SRCID__THM_DIG_THERM_L2H: + schedule_delayed_work(&smu->swctf_delayed_work, + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); + break; + case THM_11_0__SRCID__THM_DIG_THERM_H2L: + dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); + break; + default: + dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", + src_id); + break; + } + } return 0; } @@ -894,7 +947,17 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu) irq_src->num_types = 1; irq_src->funcs = &smu_v14_0_irq_funcs; - // TODO: THM related + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_L2H, + irq_src); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_H2L, + irq_src); + if (ret) + return ret; ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, SMU_IH_INTERRUPT_ID_TO_DRIVER, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c new file mode 100644 index 000000000000..706265220292 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -0,0 +1,1796 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include <linux/firmware.h> +#include <linux/pci.h> +#include <linux/i2c.h> +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" +#include "smu_v14_0.h" +#include "smu14_driver_if_v14_0.h" +#include "soc15_common.h" +#include "atom.h" +#include "smu_v14_0_2_ppt.h" +#include "smu_v14_0_2_pptable.h" +#include "smu_v14_0_2_ppsmc.h" +#include "mp/mp_14_0_2_offset.h" +#include "mp/mp_14_0_2_sh_mask.h" + +#include "smu_cmn.h" +#include "amdgpu_ras.h" + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) + +#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 + +static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), + MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), + MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), + MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), + MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), + MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), + MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), + MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), + MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), + MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, + PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), + MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), + MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), +}; + +static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, PPCLK_GFXCLK), + CLK_MAP(SCLK, PPCLK_GFXCLK), + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(FCLK, PPCLK_FCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(MCLK, PPCLK_UCLK), + CLK_MAP(VCLK, PPCLK_VCLK_0), + CLK_MAP(DCLK, PPCLK_DCLK_0), +}; + +static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = { + FEA_MAP(FW_DATA_READ), + FEA_MAP(DPM_GFXCLK), + FEA_MAP(DPM_GFX_POWER_OPTIMIZER), + FEA_MAP(DPM_UCLK), + FEA_MAP(DPM_FCLK), + FEA_MAP(DPM_SOCCLK), + FEA_MAP(DPM_LINK), + FEA_MAP(DPM_DCN), + FEA_MAP(VMEMP_SCALING), + FEA_MAP(VDDIO_MEM_SCALING), + FEA_MAP(DS_GFXCLK), + FEA_MAP(DS_SOCCLK), + FEA_MAP(DS_FCLK), + FEA_MAP(DS_LCLK), + FEA_MAP(DS_DCFCLK), + FEA_MAP(DS_UCLK), + FEA_MAP(GFX_ULV), + FEA_MAP(FW_DSTATE), + FEA_MAP(GFXOFF), + FEA_MAP(BACO), + FEA_MAP(MM_DPM), + FEA_MAP(SOC_MPCLK_DS), + FEA_MAP(BACO_MPCLK_DS), + FEA_MAP(THROTTLERS), + FEA_MAP(SMARTSHIFT), + FEA_MAP(GTHR), + FEA_MAP(ACDC), + FEA_MAP(VR0HOT), + FEA_MAP(FW_CTF), + FEA_MAP(FAN_CONTROL), + FEA_MAP(GFX_DCS), + FEA_MAP(GFX_READ_MARGIN), + FEA_MAP(LED_DISPLAY), + FEA_MAP(GFXCLK_SPREAD_SPECTRUM), + FEA_MAP(OUT_OF_BAND_MONITOR), + FEA_MAP(OPTIMIZED_VMIN), + FEA_MAP(GFX_IMU), + FEA_MAP(BOOT_TIME_CAL), + FEA_MAP(GFX_PCC_DFLL), + FEA_MAP(SOC_CG), + FEA_MAP(DF_CSTATE), + FEA_MAP(GFX_EDC), + FEA_MAP(BOOT_POWER_OPT), + FEA_MAP(CLOCK_POWER_DOWN_BYPASS), + FEA_MAP(DS_VCN), + FEA_MAP(BACO_CG), + FEA_MAP(MEM_TEMP_READ), + FEA_MAP(ATHUB_MMHUB_PG), + FEA_MAP(SOC_PCC), + [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, +}; + +static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PPTABLE), + TAB_MAP(WATERMARKS), + TAB_MAP(AVFS_PSM_DEBUG), + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(DRIVER_SMU_CONFIG), + TAB_MAP(ACTIVITY_MONITOR_COEFF), + [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, + TAB_MAP(I2C_COMMANDS), + TAB_MAP(ECCINFO), +}; + +static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { + PWR_MAP(AC), + PWR_MAP(DC), +}; + +static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), +}; + +#if 0 +static const uint8_t smu_v14_0_2_throttler_map[] = { + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), + [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), + [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), + [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), + [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), + [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), + [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), + [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), + [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), + [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), + [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), +}; +#endif + +static int +smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) +{ + struct amdgpu_device *adev = smu->adev; + /*u32 smu_version;*/ + + if (num > 2) + return -EINVAL; + + memset(feature_mask, 0xff, sizeof(uint32_t) * num); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); + } +#if 0 + if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || + !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); + + if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); + + /* PMFW 78.58 contains a critical fix for gfxoff feature */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if ((smu_version < 0x004e3a00) || + !(adev->pm.pp_feature & PP_GFXOFF_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); + + if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); + } + + if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); + + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); + } + + if (!(adev->pm.pp_feature & PP_ULV_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); +#endif + + return 0; +} + +static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + struct smu_baco_context *smu_baco = &smu->smu_baco; + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC) + smu->dc_controlled_by_gpio = true; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) { + smu_baco->platform_support = true; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO) + smu_baco->maco_support = true; + } + + if (!overdrive_lowerlimits->FeatureCtrlMask || + !overdrive_upperlimits->FeatureCtrlMask) + smu->od_enabled = false; + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; + + /* + * Instead of having its own buffer space and get overdrive_table copied, + * smu->od_settings just points to the actual overdrive_table + */ + smu->od_settings = &powerplay_table->overdrive_table; + + smu->adev->pm.no_fan = + !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); + + return 0; +} + +static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + + memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, + sizeof(PPTable_t)); + + return 0; +} + +#ifndef atom_smc_dpm_info_table_14_0_0 +struct atom_smc_dpm_info_table_14_0_0 { + struct atom_common_table_header table_header; + BoardTable_t BoardTable; +}; +#endif + +static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; + BoardTable_t *BoardTable = &smc_pptable->BoardTable; + int index, ret; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smc_dpm_info); + + ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, + (uint8_t **)&smc_dpm_table); + if (ret) + return ret; + + memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); + + return 0; +} + +#if 0 +static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, + void **table, + uint32_t *size) +{ + struct smu_table_context *smu_table = &smu->smu_table; + void *combo_pptable = smu_table->combo_pptable; + int ret = 0; + + ret = smu_cmn_get_combo_pptable(smu); + if (ret) + return ret; + + *table = combo_pptable; + *size = sizeof(struct smu_14_0_powerplay_table); + + return 0; +} +#endif + +static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, + void **table, + uint32_t *size) +{ + struct smu_table_context *smu_table = &smu->smu_table; + void *combo_pptable = smu_table->combo_pptable; + int ret = 0; + + ret = smu_cmn_get_combo_pptable(smu); + if (ret) + return ret; + + *table = combo_pptable; + *size = sizeof(struct smu_14_0_2_powerplay_table); + + return 0; +} + +static int smu_v14_0_2_setup_pptable(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if (amdgpu_sriov_vf(smu->adev)) + return 0; + + if (!adev->scpm_enabled) + ret = smu_v14_0_setup_pptable(smu); + else + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, + &smu_table->power_play_table, + &smu_table->power_play_table_size); + if (ret) + return ret; + + ret = smu_v14_0_2_store_powerplay_table(smu); + if (ret) + return ret; + + /* + * With SCPM enabled, the operation below will be handled + * by PSP. Driver involvment is unnecessary and useless. + */ + if (!adev->scpm_enabled) { + ret = smu_v14_0_2_append_powerplay_table(smu); + if (ret) + return ret; + } + + ret = smu_v14_0_2_check_powerplay_table(smu); + if (ret) + return ret; + + return ret; +} + +static int smu_v14_0_2_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, + sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); + if (!smu_table->metrics_table) + goto err0_out; + smu_table->metrics_time = 0; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); + if (!smu_table->watermarks_table) + goto err2_out; + + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + goto err3_out; + + return 0; + +err3_out: + kfree(smu_table->watermarks_table); +err2_out: + kfree(smu_table->gpu_metrics_table); +err1_out: + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; +} + +static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context), + GFP_KERNEL); + if (!smu_dpm->dpm_context) + return -ENOMEM; + + smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context); + + return 0; +} + +static int smu_v14_0_2_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_v14_0_2_tables_init(smu); + if (ret) + return ret; + + ret = smu_v14_0_2_allocate_dpm_context(smu); + if (ret) + return ret; + + return smu_v14_0_init_smc_tables(smu); +} + +static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) +{ + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + struct smu_14_0_dpm_table *dpm_table; + struct smu_14_0_pcie_table *pcie_table; + uint32_t link_level; + int ret = 0; + + /* socclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.soc_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_SOCCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* gfxclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_GFXCLK, + dpm_table); + if (ret) + return ret; + + /* + * Update the reported maximum shader clock to the value + * which can be guarded to be achieved on all cards. This + * is aligned with Window setting. And considering that value + * might be not the peak frequency the card can achieve, it + * is normal some real-time clock frequency can overtake this + * labelled maximum clock frequency(for example in pp_dpm_sclk + * sysfs output). + */ + if (skutable->DriverReportedClocks.GameClockAc && + (dpm_table->dpm_levels[dpm_table->count - 1].value > + skutable->DriverReportedClocks.GameClockAc)) { + dpm_table->dpm_levels[dpm_table->count - 1].value = + skutable->DriverReportedClocks.GameClockAc; + dpm_table->max = skutable->DriverReportedClocks.GameClockAc; + } + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* uclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_UCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* fclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_FCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* vclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_VCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* dclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_DCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* lclk dpm table setup */ + pcie_table = &dpm_context->dpm_tables.pcie_table; + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + return 0; +} + +static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) +{ + int ret = 0; + uint64_t feature_enabled; + + ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); + if (ret) + return false; + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static void smu_v14_0_2_dump_pptable(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + PFE_Settings_t *PFEsettings = &pptable->PFE_Settings; + + dev_info(smu->adev->dev, "Dumped PPTable:\n"); + + dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version); + dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]); + dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]); +} + +static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) +{ + uint32_t throttler_status = 0; + int i; + + for (i = 0; i < THROTTLER_COUNT; i++) + throttler_status |= + (metrics->ThrottlingPercentage[i] ? 1U << i : 0); + + return throttler_status; +} + +#define SMU_14_0_2_BUSY_THRESHOLD 5 +static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) + return ret; + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->CurrClock[PPCLK_GFXCLK]; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->CurrClock[PPCLK_SOCCLK]; + break; + case METRICS_CURR_UCLK: + *value = metrics->CurrClock[PPCLK_UCLK]; + break; + case METRICS_CURR_VCLK: + *value = metrics->CurrClock[PPCLK_VCLK_0]; + break; + case METRICS_CURR_DCLK: + *value = metrics->CurrClock[PPCLK_DCLK_0]; + break; + case METRICS_CURR_FCLK: + *value = metrics->CurrClock[PPCLK_FCLK]; + break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCFCLK]; + break; + case METRICS_AVERAGE_GFXCLK: + if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageGfxclkFrequencyPostDs; + else + *value = metrics->AverageGfxclkFrequencyPreDs; + break; + case METRICS_AVERAGE_FCLK: + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageFclkFrequencyPostDs; + else + *value = metrics->AverageFclkFrequencyPreDs; + break; + case METRICS_AVERAGE_UCLK: + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageMemclkFrequencyPostDs; + else + *value = metrics->AverageMemclkFrequencyPreDs; + break; + case METRICS_AVERAGE_VCLK: + *value = metrics->AverageVclk0Frequency; + break; + case METRICS_AVERAGE_DCLK: + *value = metrics->AverageDclk0Frequency; + break; + case METRICS_AVERAGE_VCLK1: + *value = metrics->AverageVclk1Frequency; + break; + case METRICS_AVERAGE_DCLK1: + *value = metrics->AverageDclk1Frequency; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->AverageGfxActivity; + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = metrics->AverageUclkActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = metrics->AverageSocketPower << 8; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->AvgTemperature[TEMP_EDGE] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->AvgTemperature[TEMP_HOTSPOT] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = metrics->AvgTemperature[TEMP_MEM] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRGFX: + *value = metrics->AvgTemperature[TEMP_VR_GFX] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRSOC: + *value = metrics->AvgTemperature[TEMP_VR_SOC] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = smu_v14_0_2_get_throttler_status(metrics); + break; + case METRICS_CURR_FANSPEED: + *value = metrics->AvgFanRpm; + break; + case METRICS_CURR_FANPWM: + *value = metrics->AvgFanPwm; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; + break; + case METRICS_PCIE_RATE: + *value = metrics->PcieRate; + break; + case METRICS_PCIE_WIDTH: + *value = metrics->PcieWidth; + break; + default: + *value = UINT_MAX; + break; + } + + return ret; +} + +static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + struct smu_14_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_14_0_dpm_table *dpm_table; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + /* uclk dpm table */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + /* gfxclk dpm table */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + /* socclk dpm table */ + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + /* fclk dpm table */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + case SMU_VCLK1: + /* vclk dpm table */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + case SMU_DCLK1: + /* dclk dpm table */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + dev_err(smu->adev->dev, "Unsupported clock type!\n"); + return -EINVAL; + } + + if (min) + *min = dpm_table->min; + if (max) + *max = dpm_table->max; + + return 0; +} + +static int smu_v14_0_2_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, + uint32_t *size) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + int ret = 0; + + switch (sensor) { + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: + *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm; + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_MEMACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_HOTSPOT, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_EDGE, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_MEM, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDGFX, + (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + int clk_id = 0; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) + return -EINVAL; + + switch (clk_id) { + case PPCLK_GFXCLK: + member_type = METRICS_AVERAGE_GFXCLK; + break; + case PPCLK_UCLK: + member_type = METRICS_CURR_UCLK; + break; + case PPCLK_FCLK: + member_type = METRICS_CURR_FCLK; + break; + case PPCLK_SOCCLK: + member_type = METRICS_CURR_SOCCLK; + break; + case PPCLK_VCLK_0: + member_type = METRICS_AVERAGE_VCLK; + break; + case PPCLK_DCLK_0: + member_type = METRICS_AVERAGE_DCLK; + break; + default: + return -EINVAL; + } + + return smu_v14_0_2_get_smu_metrics_data(smu, + member_type, + value); +} + +static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + char *buf) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_14_0_dpm_table *single_dpm_table; + int i, curr_freq, size = 0; + int ret = 0; + + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } + + switch (clk_type) { + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + break; + case SMU_MCLK: + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + break; + case SMU_SOCCLK: + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + break; + case SMU_FCLK: + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + break; + case SMU_VCLK: + case SMU_VCLK1: + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + break; + case SMU_DCLK: + case SMU_DCLK1: + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + break; + default: + break; + } + + switch (clk_type) { + case SMU_SCLK: + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_VCLK1: + case SMU_DCLK: + case SMU_DCLK1: + ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); + if (ret) { + dev_err(smu->adev->dev, "Failed to get current clock freq!"); + return ret; + } + + if (single_dpm_table->is_fine_grained) { + /* + * For fine grained dpms, there are only two dpm levels: + * - level 0 -> min clock freq + * - level 1 -> max clock freq + * And the current clock frequency can be any value between them. + * So, if the current clock frequency is not at level 0 or level 1, + * we will fake it as three dpm levels: + * - level 0 -> min clock freq + * - level 1 -> current actual clock freq + * - level 2 -> max clock freq + */ + if ((single_dpm_table->dpm_levels[0].value != curr_freq) && + (single_dpm_table->dpm_levels[1].value != curr_freq)) { + size += sysfs_emit_at(buf, size, "0: %uMhz\n", + single_dpm_table->dpm_levels[0].value); + size += sysfs_emit_at(buf, size, "1: %uMhz *\n", + curr_freq); + size += sysfs_emit_at(buf, size, "2: %uMhz\n", + single_dpm_table->dpm_levels[1].value); + } else { + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + single_dpm_table->dpm_levels[0].value, + single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + single_dpm_table->dpm_levels[1].value, + single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); + } + } else { + for (i = 0; i < single_dpm_table->count; i++) + size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + i, single_dpm_table->dpm_levels[i].value, + single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); + } + break; + case SMU_PCIE: + // TODO + break; + + default: + break; + } + + return size; +} + +static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_14_0_dpm_table *single_dpm_table; + uint32_t soft_min_level, soft_max_level; + uint32_t min_freq, max_freq; + int ret = 0; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + break; + case SMU_MCLK: + case SMU_UCLK: + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + break; + case SMU_SOCCLK: + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + break; + case SMU_FCLK: + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + break; + case SMU_VCLK: + case SMU_VCLK1: + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + break; + case SMU_DCLK: + case SMU_DCLK1: + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + break; + default: + break; + } + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + case SMU_MCLK: + case SMU_UCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_VCLK1: + case SMU_DCLK: + case SMU_DCLK1: + if (single_dpm_table->is_fine_grained) { + /* There is only 2 levels for fine grained DPM */ + soft_max_level = (soft_max_level >= 1 ? 1 : 0); + soft_min_level = (soft_min_level >= 1 ? 1 : 0); + } else { + if ((soft_max_level >= single_dpm_table->count) || + (soft_min_level >= single_dpm_table->count)) + return -EINVAL; + } + + min_freq = single_dpm_table->dpm_levels[soft_min_level].value; + max_freq = single_dpm_table->dpm_levels[soft_max_level].value; + + ret = smu_v14_0_set_soft_freq_limited_range(smu, + clk_type, + min_freq, + max_freq); + break; + case SMU_DCEFCLK: + case SMU_PCIE: + default: + break; + } + + return ret; +} + +static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_14_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + uint32_t smu_pcie_arg; + int ret, i; + + for (i = 0; i < pcie_table->num_of_link_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap) + pcie_table->pcie_gen[i] = pcie_gen_cap; + if (pcie_table->pcie_lane[i] > pcie_width_cap) + pcie_table->pcie_lane[i] = pcie_width_cap; + + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + return ret; + } + + return 0; +} + +static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + // TODO + + return 0; +} + +static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu) +{ + // TODO + + return 0; +} + +static void smu_v14_0_2_get_unique_id(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); + struct amdgpu_device *adev = smu->adev; + uint32_t upper32 = 0, lower32 = 0; + int ret; + + ret = smu_cmn_get_metrics_table(smu, NULL, false); + if (ret) + goto out; + + upper32 = metrics->PublicSerialNumberUpper; + lower32 = metrics->PublicSerialNumberLower; + +out: + adev->unique_id = ((uint64_t)upper32 << 32) | lower32; +} + +static int smu_v14_0_2_get_power_limit(struct smu_context *smu, + uint32_t *current_power_limit, + uint32_t *default_power_limit, + uint32_t *max_power_limit, + uint32_t *min_power_limit) +{ + // TODO + + return 0; +} + +static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, + char *buf) +{ + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; + DpmActivityMonitorCoeffInt_t *activity_monitor = + &(activity_monitor_external.DpmActivityMonitorCoeffInt); + static const char *title[] = { + "PROFILE_INDEX(NAME)", + "CLOCK_TYPE(NAME)", + "FPS", + "MinActiveFreqType", + "MinActiveFreq", + "BoosterFreqType", + "BoosterFreq", + "PD_Data_limit_c", + "PD_Data_error_coeff", + "PD_Data_error_rate_coeff"}; + int16_t workload_type = 0; + uint32_t i, size = 0; + int result = 0; + + if (!buf) + return -EINVAL; + + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", + title[0], title[1], title[2], title[3], title[4], title[5], + title[6], title[7], title[8], title[9]); + + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + i); + if (workload_type == -ENOTSUPP) + continue; + else if (workload_type < 0) + return -EINVAL; + + result = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + workload_type, + (void *)(&activity_monitor_external), + false); + if (result) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return result; + } + + size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 0, + "GFXCLK", + activity_monitor->Gfx_FPS, + activity_monitor->Gfx_MinActiveFreqType, + activity_monitor->Gfx_MinActiveFreq, + activity_monitor->Gfx_BoosterFreqType, + activity_monitor->Gfx_BoosterFreq, + activity_monitor->Gfx_PD_Data_limit_c, + activity_monitor->Gfx_PD_Data_error_coeff, + activity_monitor->Gfx_PD_Data_error_rate_coeff); + + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 1, + "FCLK", + activity_monitor->Fclk_FPS, + activity_monitor->Fclk_MinActiveFreqType, + activity_monitor->Fclk_MinActiveFreq, + activity_monitor->Fclk_BoosterFreqType, + activity_monitor->Fclk_BoosterFreq, + activity_monitor->Fclk_PD_Data_limit_c, + activity_monitor->Fclk_PD_Data_error_coeff, + activity_monitor->Fclk_PD_Data_error_rate_coeff); + } + + return size; +} + +static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, + long *input, + uint32_t size) +{ + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; + DpmActivityMonitorCoeffInt_t *activity_monitor = + &(activity_monitor_external.DpmActivityMonitorCoeffInt); + int workload_type, ret = 0; + + smu->power_profile_mode = input[size]; + + if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { + dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); + return -EINVAL; + } + + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } + + switch (input[0]) { + case 0: /* Gfxclk */ + activity_monitor->Gfx_FPS = input[1]; + activity_monitor->Gfx_MinActiveFreqType = input[2]; + activity_monitor->Gfx_MinActiveFreq = input[3]; + activity_monitor->Gfx_BoosterFreqType = input[4]; + activity_monitor->Gfx_BoosterFreq = input[5]; + activity_monitor->Gfx_PD_Data_limit_c = input[6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; + break; + case 1: /* Fclk */ + activity_monitor->Fclk_FPS = input[1]; + activity_monitor->Fclk_MinActiveFreqType = input[2]; + activity_monitor->Fclk_MinActiveFreq = input[3]; + activity_monitor->Fclk_BoosterFreqType = input[4]; + activity_monitor->Fclk_BoosterFreq = input[5]; + activity_monitor->Fclk_PD_Data_limit_c = input[6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; + break; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } + } + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + smu->power_profile_mode); + if (workload_type < 0) + return -EINVAL; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + 1 << workload_type, + NULL); +} + +static int smu_v14_0_2_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) + return smu_v14_0_baco_set_armd3_sequence(smu, + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + else + return smu_v14_0_baco_enter(smu); +} + +static int smu_v14_0_2_baco_exit(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + return smu_v14_0_baco_exit(smu); + } +} + +static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu) +{ + // TODO + + return true; +} + +static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, int num_msgs) +{ + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = &smu_table->driver_table; + SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; + int i, j, r, c; + u16 dir; + + if (!adev->pm.dpm_enabled) + return -EBUSY; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->I2CcontrollerPort = smu_i2c->port; + req->I2CSpeed = I2C_SPEED_FAST_400K; + req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ + dir = msg[0].flags & I2C_M_RD; + + for (c = i = 0; i < num_msgs; i++) { + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; + + if (!(msg[i].flags & I2C_M_RD)) { + /* write */ + cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; + cmd->ReadWriteData = msg[i].buf[j]; + } + + if ((dir ^ msg[i].flags) & I2C_M_RD) { + /* The direction changes. + */ + dir = msg[i].flags & I2C_M_RD; + cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; + } + + req->NumCmds++; + + /* + * Insert STOP if we are at the last byte of either last + * message for the transaction or the client explicitly + * requires a STOP at this particular message. + */ + if ((j == msg[i].len - 1) && + ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { + cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; + cmd->CmdConfig |= CMDCONFIG_STOP_MASK; + } + } + } + mutex_lock(&adev->pm.mutex); + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); + mutex_unlock(&adev->pm.mutex); + if (r) + goto fail; + + for (c = i = 0; i < num_msgs; i++) { + if (!(msg[i].flags & I2C_M_RD)) { + c += msg[i].len; + continue; + } + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; + + msg[i].buf[j] = cmd->ReadWriteData; + } + } + r = num_msgs; +fail: + kfree(req); + return r; +} + +static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smu_v14_0_2_i2c_algo = { + .master_xfer = smu_v14_0_2_i2c_xfer, + .functionality = smu_v14_0_2_i2c_func, +}; + +static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = { + .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, + .max_read_len = MAX_SW_I2C_COMMANDS, + .max_write_len = MAX_SW_I2C_COMMANDS, + .max_comb_1st_msg_len = 2, + .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, +}; + +static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int res, i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + smu_i2c->adev = adev; + smu_i2c->port = i; + mutex_init(&smu_i2c->mutex); + control->owner = THIS_MODULE; + control->class = I2C_CLASS_SPD; + control->dev.parent = &adev->pdev->dev; + control->algo = &smu_v14_0_2_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); + control->quirks = &smu_v14_0_2_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); + + res = i2c_add_adapter(control); + if (res) { + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + } + + /* assign the buses used for the FRU EEPROM and RAS EEPROM */ + /* XXX ideally this would be something in a vbios data table */ + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + + return 0; +Out_err: + for ( ; i >= 0; i--) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + return res; +} + +static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; +} + +static int smu_v14_0_2_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + ret = smu_cmn_set_mp1_state(smu, mp1_state); + break; + default: + /* Ignore others */ + ret = 0; + } + + return ret; +} + +static int smu_v14_0_2_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + +static int smu_v14_0_2_mode1_reset(struct smu_context *smu) +{ + int ret = 0; + + // TODO + + return ret; +} + +static int smu_v14_0_2_mode2_reset(struct smu_context *smu) +{ + int ret = 0; + + // TODO + + return ret; +} + +static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2)) + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, + FEATURE_PWR_GFX, NULL); + else + return -EOPNOTSUPP; +} + +static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82); + smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66); + smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90); +} + +static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu, + uint32_t size) +{ + int ret = 0; + + /* message SMU to update the bad page number on SMUBUS */ + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetNumBadMemoryPagesRetired, + size, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to message SMU to update bad memory pages number\n", + __func__); + + return ret; +} + +static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu, + uint32_t size) +{ + int ret = 0; + + /* message SMU to update the bad channel info on SMUBUS */ + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, + size, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to message SMU to update bad memory pages channel info\n", + __func__); + + return ret; +} + +static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu, + void *table) +{ + int ret = 0; + + // TODO + + return ret; +} + +static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { + .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, + .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, + .i2c_init = smu_v14_0_2_i2c_control_init, + .i2c_fini = smu_v14_0_2_i2c_control_fini, + .is_dpm_running = smu_v14_0_2_is_dpm_running, + .dump_pptable = smu_v14_0_2_dump_pptable, + .init_microcode = smu_v14_0_init_microcode, + .load_microcode = smu_v14_0_load_microcode, + .fini_microcode = smu_v14_0_fini_microcode, + .init_smc_tables = smu_v14_0_2_init_smc_tables, + .fini_smc_tables = smu_v14_0_fini_smc_tables, + .init_power = smu_v14_0_init_power, + .fini_power = smu_v14_0_fini_power, + .check_fw_status = smu_v14_0_check_fw_status, + .setup_pptable = smu_v14_0_2_setup_pptable, + .check_fw_version = smu_v14_0_check_fw_version, + .write_pptable = smu_cmn_write_pptable, + .set_driver_table_location = smu_v14_0_set_driver_table_location, + .system_features_control = smu_v14_0_system_features_control, + .set_allowed_mask = smu_v14_0_set_allowed_mask, + .get_enabled_mask = smu_cmn_get_enabled_mask, + .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable, + .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable, + .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq, + .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, + .read_sensor = smu_v14_0_2_read_sensor, + .feature_is_enabled = smu_cmn_feature_is_enabled, + .print_clk_levels = smu_v14_0_2_print_clk_levels, + .force_clk_levels = smu_v14_0_2_force_clk_levels, + .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters, + .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range, + .register_irq_handler = smu_v14_0_register_irq_handler, + .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, + .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, + .init_pptable_microcode = smu_v14_0_init_pptable_microcode, + .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk, + .set_performance_level = smu_v14_0_set_performance_level, + .gfx_off_control = smu_v14_0_gfx_off_control, + .get_unique_id = smu_v14_0_2_get_unique_id, + .get_power_limit = smu_v14_0_2_get_power_limit, + .set_power_limit = smu_v14_0_set_power_limit, + .set_power_source = smu_v14_0_set_power_source, + .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, + .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, + .run_btc = smu_v14_0_run_btc, + .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, + .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .set_tool_table_location = smu_v14_0_set_tool_table_location, + .deep_sleep_control = smu_v14_0_deep_sleep_control, + .gfx_ulv_control = smu_v14_0_gfx_ulv_control, + .get_bamaco_support = smu_v14_0_get_bamaco_support, + .baco_get_state = smu_v14_0_baco_get_state, + .baco_set_state = smu_v14_0_baco_set_state, + .baco_enter = smu_v14_0_2_baco_enter, + .baco_exit = smu_v14_0_2_baco_exit, + .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported, + .mode1_reset = smu_v14_0_2_mode1_reset, + .mode2_reset = smu_v14_0_2_mode2_reset, + .enable_gfx_features = smu_v14_0_2_enable_gfx_features, + .set_mp1_state = smu_v14_0_2_set_mp1_state, + .set_df_cstate = smu_v14_0_2_set_df_cstate, + .send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num, + .send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag, + .gpo_control = smu_v14_0_gpo_control, + .get_ecc_info = smu_v14_0_2_get_ecc_info, +}; + +void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu) +{ + smu->ppt_funcs = &smu_v14_0_2_ppt_funcs; + smu->message_map = smu_v14_0_2_message_map; + smu->clock_map = smu_v14_0_2_clk_map; + smu->feature_map = smu_v14_0_2_feature_mask_map; + smu->table_map = smu_v14_0_2_table_map; + smu->pwr_src_map = smu_v14_0_2_pwr_src_map; + smu->workload_map = smu_v14_0_2_workload_map; + smu_v14_0_2_set_smu_mailbox_registers(smu); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h new file mode 100644 index 000000000000..b83729e5d6f9 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_V14_0_2_PPT_H__ +#define __SMU_V14_0_2_PPT_H__ + +extern void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu); + +#endif diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h index 94947229888b..b7f22597ee95 100644 --- a/drivers/gpu/drm/radeon/pptable.h +++ b/drivers/gpu/drm/radeon/pptable.h @@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ typedef struct _ATOM_PPLIB_STATE_V2 { //number of valid dpm levels in this state; Driver uses it to calculate the whole - //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) + //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels) UCHAR ucNumDPMLevels; //a index to the array of nonClockInfos @@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2 /** * Driver will read the first ucNumDPMLevels in this array */ - UCHAR clockInfoIndex[1]; + UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels); } ATOM_PPLIB_STATE_V2; typedef struct _StateArray{ //how many states we have UCHAR ucNumEntries; - ATOM_PPLIB_STATE_V2 states[1]; + ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries); }StateArray; @@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{ //sizeof(ATOM_PPLIB_CLOCK_INFO) UCHAR ucEntrySize; - UCHAR clockInfo[1]; + UCHAR clockInfo[] __counted_by(ucNumEntries); }ClockInfoArray; typedef struct _NonClockInfoArray{ @@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{ //sizeof(ATOM_PPLIB_NONCLOCK_INFO) UCHAR ucEntrySize; - ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries); }NonClockInfoArray; typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index bb1f0a3371ab..10793a433bf5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; for (i = 0; i < max_device; i++) { - ATOM_CONNECTOR_INFO_I2C ci = - supported_devices->info.asConnInfo[i]; + ATOM_CONNECTOR_INFO_I2C ci; + + if (frev > 1) + ci = supported_devices->info_2d1.asConnInfo[i]; + else + ci = supported_devices->info.asConnInfo[i]; bios_connectors[i].valid = false; |