diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
29 files changed, 706 insertions, 602 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 94a64e3bc682..c1b913541739 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1037,7 +1037,6 @@ struct amdgpu_uvd { bool use_ctx_buf; struct amd_sched_entity entity; uint32_t srbm_soft_reset; - bool is_powergated; }; /* @@ -1066,7 +1065,6 @@ struct amdgpu_vce { struct amd_sched_entity entity; uint32_t srbm_soft_reset; unsigned num_rings; - bool is_powergated; }; /* @@ -1484,6 +1482,9 @@ struct amdgpu_device { spinlock_t gtt_list_lock; struct list_head gtt_list; + /* record hw reset is performed */ + bool has_hw_reset; + }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1702,13 +1703,14 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) int amdgpu_gpu_reset(struct amdgpu_device *adev); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); -bool amdgpu_card_posted(struct amdgpu_device *adev); +bool amdgpu_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, u32 ip_instance, u32 ring, struct amdgpu_ring **out_ring); +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index d9def01f276e..821f7cc2051f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -100,7 +100,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) resource_size_t size = 256 * 1024; /* ??? */ if (!(adev->flags & AMD_IS_APU)) - if (!amdgpu_card_posted(adev)) + if (amdgpu_need_post(adev)) return false; adev->bios = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a5df1ef306d9..d9e5aa4a79ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -834,32 +834,57 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_TOPAZ: if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || - ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { + info->is_kicker = true; strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); - else + } else strcpy(fw_name, "amdgpu/topaz_smc.bin"); break; case CHIP_TONGA: if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || - ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) + ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) { + info->is_kicker = true; strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); - else + } else strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; case CHIP_FIJI: strcpy(fw_name, "amdgpu/fiji_smc.bin"); break; case CHIP_POLARIS11: - if (type == CGS_UCODE_ID_SMU) - strcpy(fw_name, "amdgpu/polaris11_smc.bin"); - else if (type == CGS_UCODE_ID_SMU_SK) + if (type == CGS_UCODE_ID_SMU) { + if (((adev->pdev->device == 0x67ef) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe2) || + (adev->pdev->revision == 0xe5))) || + ((adev->pdev->device == 0x67ff) && + ((adev->pdev->revision == 0xcf) || + (adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); + } else + strcpy(fw_name, "amdgpu/polaris11_smc.bin"); + } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); + } break; case CHIP_POLARIS10: - if (type == CGS_UCODE_ID_SMU) - strcpy(fw_name, "amdgpu/polaris10_smc.bin"); - else if (type == CGS_UCODE_ID_SMU_SK) + if (type == CGS_UCODE_ID_SMU) { + if ((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe3) || + (adev->pdev->revision == 0xe4) || + (adev->pdev->revision == 0xe5) || + (adev->pdev->revision == 0xe7) || + (adev->pdev->revision == 0xef))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); + } else + strcpy(fw_name, "amdgpu/polaris10_smc.bin"); + } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); + } break; case CHIP_POLARIS12: strcpy(fw_name, "amdgpu/polaris12_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index cf2e8c4e9b8b..d2d0f60ff36d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } break; } + + if (!(*out_ring && (*out_ring)->adev)) { + DRM_ERROR("Ring %d is not initialized on IP %d\n", + ring, ip_type); + return -EINVAL; + } + return 0; } @@ -344,8 +351,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) * submission. This can result in a debt that can stop buffer migrations * temporarily. */ -static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, - u64 num_bytes) +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes) { spin_lock(&adev->mm_stats.lock); adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 944ba0d3874a..6abb238b25c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -619,25 +619,29 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) * GPU helpers function. */ /** - * amdgpu_card_posted - check if the hw has already been initialized + * amdgpu_need_post - check if the hw need post or not * * @adev: amdgpu_device pointer * - * Check if the asic has been initialized (all asics). - * Used at driver startup. - * Returns true if initialized or false if not. + * Check if the asic has been initialized (all asics) at driver startup + * or post is needed if hw reset is performed. + * Returns true if need or false if not. */ -bool amdgpu_card_posted(struct amdgpu_device *adev) +bool amdgpu_need_post(struct amdgpu_device *adev) { uint32_t reg; + if (adev->has_hw_reset) { + adev->has_hw_reset = false; + return true; + } /* then check MEM_SIZE, in case the crtcs are off */ reg = RREG32(mmCONFIG_MEMSIZE); if (reg) - return true; + return false; - return false; + return true; } @@ -665,7 +669,7 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) return true; } } - return !amdgpu_card_posted(adev); + return amdgpu_need_post(adev); } /** @@ -2071,7 +2075,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) amdgpu_atombios_scratch_regs_restore(adev); /* post card */ - if (!amdgpu_card_posted(adev) || !resume) { + if (amdgpu_need_post(adev)) { r = amdgpu_atom_asic_init(adev->mode_info.atom_context); if (r) DRM_ERROR("amdgpu asic init failed\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 9bd1b4eae32e..51d759463384 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -487,67 +487,44 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) * * @adev: amdgpu_device pointer * @bo_va: bo_va to update + * @list: validation list + * @operation: map or unmap * - * Update the bo_va directly after setting it's address. Errors are not + * Update the bo_va directly after setting its address. Errors are not * vital here, so they are not reported back to userspace. */ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, + struct list_head *list, uint32_t operation) { - struct ttm_validate_buffer tv, *entry; - struct amdgpu_bo_list_entry vm_pd; - struct ww_acquire_ctx ticket; - struct list_head list, duplicates; - int r; - - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&duplicates); - - tv.bo = &bo_va->bo->tbo; - tv.shared = true; - list_add(&tv.head, &list); - - amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); + struct ttm_validate_buffer *entry; + int r = -ERESTARTSYS; - /* Provide duplicates to avoid -EALREADY */ - r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); - if (r) - goto error_print; - - list_for_each_entry(entry, &list, head) { + list_for_each_entry(entry, list, head) { struct amdgpu_bo *bo = container_of(entry->bo, struct amdgpu_bo, tbo); - - /* if anything is swapped out don't swap it in here, - just abort and wait for the next CS */ - if (!amdgpu_bo_gpu_accessible(bo)) - goto error_unreserve; - - if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) - goto error_unreserve; + if (amdgpu_gem_va_check(NULL, bo)) + goto error; } r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, NULL); if (r) - goto error_unreserve; + goto error; r = amdgpu_vm_update_page_directory(adev, bo_va->vm); if (r) - goto error_unreserve; + goto error; r = amdgpu_vm_clear_freed(adev, bo_va->vm); if (r) - goto error_unreserve; + goto error; if (operation == AMDGPU_VA_OP_MAP) r = amdgpu_vm_bo_update(adev, bo_va, false); -error_unreserve: - ttm_eu_backoff_reservation(&ticket, &list); - -error_print: +error: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } @@ -564,7 +541,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_bo_list_entry vm_pd; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; - struct list_head list, duplicates; + struct list_head list; uint32_t invalid_flags, va_flags = 0; int r = 0; @@ -602,14 +579,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return -ENOENT; abo = gem_to_amdgpu_bo(gobj); INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&duplicates); tv.bo = &abo->tbo; - tv.shared = true; + tv.shared = false; list_add(&tv.head, &list); amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); - r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) { drm_gem_object_unreference_unlocked(gobj); return r; @@ -640,10 +616,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, default: break; } - ttm_eu_backoff_reservation(&ticket, &list); if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug) - amdgpu_gem_va_update_vm(adev, bo_va, args->operation); + amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation); + ttm_eu_backoff_reservation(&ticket, &list); drm_gem_object_unreference_unlocked(gobj); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index d1aa291b2638..be80a4a68d7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct amdgpu_bo *bo; enum ttm_bo_type type; unsigned long page_align; + u64 initial_bytes_moved; size_t acc_size; int r; @@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 */ +#ifndef CONFIG_COMPILE_TEST #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ thanks to write-combining +#endif if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " @@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock); WARN_ON(!locked); } + + initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv ? resv : &bo->tbo.ttm_resv, &amdgpu_ttm_bo_destroy); - if (unlikely(r != 0)) + amdgpu_cs_report_moved_bytes(adev, + atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved); + + if (unlikely(r != 0)) { + if (!resv) + ww_mutex_unlock(&bo->tbo.resv->lock); return r; + } bo->tbo.priority = ilog2(bo->tbo.num_pages); if (kernel) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index a61882ddc804..346e80a7119b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1142,12 +1142,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) /* XXX select vce level based on ring/task */ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; mutex_unlock(&adev->pm.mutex); + amdgpu_pm_compute_clocks(adev); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); } else { + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); mutex_lock(&adev->pm.mutex); adev->pm.dpm.vce_active = false; mutex_unlock(&adev->pm.mutex); + amdgpu_pm_compute_clocks(adev); } - amdgpu_pm_compute_clocks(adev); + } } @@ -1286,7 +1296,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) if (!adev->pm.dpm_enabled) return; - amdgpu_display_bandwidth_update(adev); + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1154b0a8881d..4c6094eefc51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -529,6 +529,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ case TTM_PL_TT: break; case TTM_PL_VRAM: + if (mem->start == AMDGPU_BO_INVALID_OFFSET) + return -EINVAL; + mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 6f62ac473064..6d6ab7f11b4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) amdgpu_dpm_enable_uvd(adev, false); } else { amdgpu_asic_set_uvd_clocks(adev, 0, 0); + /* shutdown the UVD block */ + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); } } else { schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); @@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) amdgpu_dpm_enable_uvd(adev, true); } else { amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 79bc9c7aad45..e2c06780ce49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) amdgpu_dpm_enable_vce(adev, false); } else { amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); } } else { schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); @@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) amdgpu_dpm_enable_vce(adev, true); } else { amdgpu_asic_set_vce_clocks(adev, 53300, 40000); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + } } mutex_unlock(&adev->vce.idle_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3fd951c71d1b..dcfb7df3caf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); amdgpu_vm_bo_rmv(adev, bo_va); ttm_eu_backoff_reservation(&ticket, &list); - kfree(bo_va); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 9498e78b90d7..f97ecb49972e 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev) static int ci_upload_firmware(struct amdgpu_device *adev) { - struct ci_power_info *pi = ci_get_pi(adev); int i, ret; if (amdgpu_ci_is_smc_running(adev)) { @@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev) amdgpu_ci_stop_smc_clock(adev); amdgpu_ci_reset_smc(adev); - ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end); + ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END); return ret; @@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { if (amdgpu_new_state->evclk) { - /* turn the clocks on when encoding */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - if (ret) - return ret; - pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev); tmp = RREG32_SMC(ixDPM_TABLE_475); tmp &= ~DPM_TABLE_475__VceBootLevel_MASK; @@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, ret = ci_enable_vce_dpm(adev, false); if (ret) return ret; - /* turn the clocks off when not encoding */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); } } return ret; @@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle) adev->pm.current_mclk = adev->clock.default_mclk; adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - if (amdgpu_dpm == 0) - return 0; - ret = ci_dpm_init_microcode(adev); if (ret) return ret; + if (amdgpu_dpm == 0) + return 0; + INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); mutex_lock(&adev->pm.mutex); ret = ci_dpm_init(adev); @@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_dpm) + if (!amdgpu_dpm) { + ret = ci_upload_firmware(adev); + if (ret) { + DRM_ERROR("ci_upload_firmware failed\n"); + return ret; + } + ci_dpm_start_smc(adev); return 0; + } mutex_lock(&adev->pm.mutex); ci_dpm_setup_asic(adev); @@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle) mutex_lock(&adev->pm.mutex); ci_dpm_disable(adev); mutex_unlock(&adev->pm.mutex); + } else { + ci_dpm_stop_smc(adev); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 7da688b0d27d..c4d4b35e54ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1176,6 +1176,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { /* enable BM */ pci_set_master(adev->pdev); + adev->has_hw_reset = true; r = 0; break; } @@ -1722,8 +1723,8 @@ static int cik_common_early_init(void *handle) AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_DMG |*/ AMD_PG_SUPPORT_UVD | - /*AMD_PG_SUPPORT_VCE | - AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_VCE | + /* AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GDS | AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_ACP | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 1cf1d9d1aec1..5b24e89552ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3737,9 +3737,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, default: encoder->possible_crtcs = 0x3; break; + case 3: + encoder->possible_crtcs = 0x7; + break; case 4: encoder->possible_crtcs = 0xf; break; + case 5: + encoder->possible_crtcs = 0x1f; + break; case 6: encoder->possible_crtcs = 0x3f; break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 762f8e82ceb7..e9a176891e13 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) { - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - - kfree(amdgpu_encoder->enc_priv); drm_encoder_cleanup(encoder); - kfree(amdgpu_encoder); + kfree(encoder); } static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index c998f6aaaf36..2086e7e68de4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1325,21 +1325,19 @@ static u32 gfx_v6_0_create_bitmask(u32 bit_width) return (u32)(((u64)1 << bit_width) - 1); } -static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev, - u32 max_rb_num_per_se, - u32 sh_per_se) +static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_RB_BACKEND_DISABLE); - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data = RREG32(mmCC_RB_BACKEND_DISABLE) | + RREG32(mmGC_USER_RB_BACKEND_DISABLE); - data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE); - mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se); + mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_backends_per_se/ + adev->gfx.config.max_sh_per_se); - return data & mask; + return ~data & mask; } static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf) @@ -1468,68 +1466,55 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); } -static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, - u32 se_num, u32 sh_per_se, - u32 max_rb_num_per_se) +static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) { int i, j; - u32 data, mask; - u32 disabled_rbs = 0; - u32 enabled_rbs = 0; + u32 data; + u32 raster_config = 0; + u32 active_rbs = 0; + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se; unsigned num_rb_pipes; mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - for (j = 0; j < sh_per_se; j++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); - data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); - disabled_rbs |= data << ((i * sh_per_se + j) * 2); + data = gfx_v6_0_get_rb_active_bitmap(adev); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + rb_bitmap_width_per_sh); } } gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - - mask = 1; - for (i = 0; i < max_rb_num_per_se * se_num; i++) { - if (!(disabled_rbs & mask)) - enabled_rbs |= mask; - mask <<= 1; - } - adev->gfx.config.backend_enable_mask = enabled_rbs; - adev->gfx.config.num_rbs = hweight32(enabled_rbs); + adev->gfx.config.backend_enable_mask = active_rbs; + adev->gfx.config.num_rbs = hweight32(active_rbs); num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * adev->gfx.config.max_shader_engines, 16); - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); - data = 0; - for (j = 0; j < sh_per_se; j++) { - switch (enabled_rbs & 3) { - case 1: - data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); - break; - case 2: - data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); - break; - case 3: - default: - data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); - break; - } - enabled_rbs >>= 2; - } - gfx_v6_0_raster_config(adev, &data); + gfx_v6_0_raster_config(adev, &raster_config); - if (!adev->gfx.config.backend_enable_mask || - adev->gfx.config.num_rbs >= num_rb_pipes) - WREG32(mmPA_SC_RASTER_CONFIG, data); - else - gfx_v6_0_write_harvested_raster_configs(adev, data, - adev->gfx.config.backend_enable_mask, - num_rb_pipes); + if (!adev->gfx.config.backend_enable_mask || + adev->gfx.config.num_rbs >= num_rb_pipes) { + WREG32(mmPA_SC_RASTER_CONFIG, raster_config); + } else { + gfx_v6_0_write_harvested_raster_configs(adev, raster_config, + adev->gfx.config.backend_enable_mask, + num_rb_pipes); + } + + /* cache the values for userspace */ + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + adev->gfx.config.rb_config[i][j].rb_backend_disable = + RREG32(mmCC_RB_BACKEND_DISABLE); + adev->gfx.config.rb_config[i][j].user_rb_backend_disable = + RREG32(mmGC_USER_RB_BACKEND_DISABLE); + adev->gfx.config.rb_config[i][j].raster_config = + RREG32(mmPA_SC_RASTER_CONFIG); + } } gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); @@ -1540,36 +1525,44 @@ static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev) } */ -static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh) +static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) { - u32 data, mask; + u32 data; - data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); + if (!bitmap) + return; - data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - mask = gfx_v6_0_create_bitmask(cu_per_sh); + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} - return ~data & mask; +static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev) +{ + u32 data, mask; + + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | + RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); + + mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_cu_per_sh); + return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask; } -static void gfx_v6_0_setup_spi(struct amdgpu_device *adev, - u32 se_num, u32 sh_per_se, - u32 cu_per_sh) +static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) { int i, j, k; u32 data, mask; u32 active_cu = 0; mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - for (j = 0; j < sh_per_se; j++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); data = RREG32(mmSPI_STATIC_THREAD_MGMT_3); - active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh); + active_cu = gfx_v6_0_get_cu_enabled(adev); mask = 1; for (k = 0; k < 16; k++) { @@ -1717,6 +1710,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT; break; } + gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK; + if (adev->gfx.config.max_shader_engines == 2) + gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT; adev->gfx.config.gb_addr_config = gb_addr_config; WREG32(mmGB_ADDR_CONFIG, gb_addr_config); @@ -1735,13 +1731,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) #endif gfx_v6_0_tiling_mode_table_init(adev); - gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines, - adev->gfx.config.max_sh_per_se, - adev->gfx.config.max_backends_per_se); + gfx_v6_0_setup_rb(adev); - gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines, - adev->gfx.config.max_sh_per_se, - adev->gfx.config.max_cu_per_sh); + gfx_v6_0_setup_spi(adev); gfx_v6_0_get_cu_info(adev); @@ -2941,61 +2933,16 @@ static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev, } } -static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev, - u32 se, u32 sh) -{ - - u32 mask = 0, tmp, tmp1; - int i; - - mutex_lock(&adev->grbm_idx_mutex); - gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff); - tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - - tmp &= 0xffff0000; - - tmp |= tmp1; - tmp >>= 16; - - for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { - mask <<= 1; - mask |= 1; - } - - return (~tmp) & mask; -} - static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev) { - u32 i, j, k, active_cu_number = 0; - - u32 mask, counter, cu_bitmap; - u32 tmp = 0; - - for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { - for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - mask = 1; - cu_bitmap = 0; - counter = 0; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { - if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) { - if (counter < 2) - cu_bitmap |= mask; - counter++; - } - mask <<= 1; - } + u32 tmp; - active_cu_number += counter; - tmp |= (cu_bitmap << (i * 16 + j * 8)); - } - } + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); - WREG32(mmRLC_PG_AO_CU_MASK, tmp); - WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number); + tmp = RREG32(mmRLC_MAX_PG_CU); + tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; + tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); + WREG32(mmRLC_MAX_PG_CU, tmp); } static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, @@ -3770,18 +3717,26 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v6_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); + bitmap = gfx_v6_0_get_cu_enabled(adev); cu_info->bitmap[i][j] = bitmap; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + for (k = 0; k < 16; k++) { if (bitmap & mask) { if (counter < 2) ao_bitmap |= mask; @@ -3794,6 +3749,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) } } + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index e3589b55a1e1..1f9354541f29 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1983,6 +1983,14 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); + + tmp = RREG32(mmSPI_ARB_PRIORITY); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); + WREG32(mmSPI_ARB_PRIORITY, tmp); + mutex_unlock(&adev->grbm_idx_mutex); udelay(50); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 35f9cd83b821..67afc901905c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3898,6 +3898,14 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); + + tmp = RREG32(mmSPI_ARB_PRIORITY); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); + WREG32(mmSPI_ARB_PRIORITY, tmp); + mutex_unlock(&adev->grbm_idx_mutex); } @@ -7260,7 +7268,7 @@ static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t c static union { struct amdgpu_ce_ib_state regular; struct amdgpu_ce_ib_state_chained_ib chained; - } ce_payload = {0}; + } ce_payload = {}; if (ring->adev->virt.chained_ib_support) { ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload); @@ -7287,7 +7295,7 @@ static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t c static union { struct amdgpu_de_ib_state regular; struct amdgpu_de_ib_state_chained_ib chained; - } de_payload = {0}; + } de_payload = {}; gds_addr = csa_addr + 4096; if (ring->adev->virt.chained_ib_support) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e2b0b1646f99..0635829b18cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); + if (adev->mode_info.num_crtc) + amdgpu_display_set_vga_render_state(adev, false); + gmc_v6_0_mc_stop(adev, &save); if (gmc_v6_0_wait_for_idle((void *)adev)) { @@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v6_0_mc_resume(adev, &save); - amdgpu_display_set_vga_render_state(adev, false); } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 8785ca570729..f5a343cb0010 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { kv_dpm_powergate_vce(adev, false); - /* turn the clocks on when encoding */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - if (ret) - return ret; if (pi->caps_stable_p_state) pi->vce_boot_level = table->count - 1; else @@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_VCEDPM_SetEnabledMask, (1 << pi->vce_boot_level)); - kv_enable_vce_dpm(adev, true); } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { kv_enable_vce_dpm(adev, false); - /* turn the clocks off when not encoding */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - if (ret) - return ret; kv_dpm_powergate_vce(adev, true); } @@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) struct kv_power_info *pi = kv_get_pi(adev); int ret; - if (pi->uvd_power_gated == gate) - return; - pi->uvd_power_gated = gate; if (gate) { - if (pi->caps_uvd_pg) { - /* disable clockgating so we can properly shut down the block */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - /* shutdown the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - /* XXX: check for errors */ - } + /* stop the UVD block */ + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); kv_update_uvd_dpm(adev, gate); if (pi->caps_uvd_pg) /* power off the UVD block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); } else { - if (pi->caps_uvd_pg) { + if (pi->caps_uvd_pg) /* power on the UVD block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); /* re-init the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); - /* XXX: check for errors */ - } kv_update_uvd_dpm(adev, gate); + + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); } } static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) { struct kv_power_info *pi = kv_get_pi(adev); - int ret; if (pi->vce_power_gated == gate) return; pi->vce_power_gated = gate; - if (gate) { - if (pi->caps_vce_pg) { - /* shutdown the VCE block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - /* XXX: check for errors */ - /* power off the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); - } - } else { - if (pi->caps_vce_pg) { - /* power on the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); - /* re-init the VCE block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - /* XXX: check for errors */ - } - } + if (!pi->caps_vce_pg) + return; + + if (gate) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); + else + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); } static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) @@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle) kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_samu(adev, true); - kv_dpm_powergate_vce(adev, true); - kv_dpm_powergate_uvd(adev, true); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index da46992f7b18..b71e3faa40db 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1010,24 +1010,81 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {PA_SC_RASTER_CONFIG, false, true}, }; -static uint32_t si_read_indexed_register(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, - u32 reg_offset) +static uint32_t si_get_register_value(struct amdgpu_device *adev, + bool indexed, u32 se_num, + u32 sh_num, u32 reg_offset) { - uint32_t val; + if (indexed) { + uint32_t val; + unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; + unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; + + switch (reg_offset) { + case mmCC_RB_BACKEND_DISABLE: + return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; + case mmGC_USER_RB_BACKEND_DISABLE: + return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; + case mmPA_SC_RASTER_CONFIG: + return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; + } - mutex_lock(&adev->grbm_idx_mutex); - if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + mutex_lock(&adev->grbm_idx_mutex); + if (se_num != 0xffffffff || sh_num != 0xffffffff) + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); - val = RREG32(reg_offset); + val = RREG32(reg_offset); - if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - return val; + if (se_num != 0xffffffff || sh_num != 0xffffffff) + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + return val; + } else { + unsigned idx; + + switch (reg_offset) { + case mmGB_ADDR_CONFIG: + return adev->gfx.config.gb_addr_config; + case mmMC_ARB_RAMCFG: + return adev->gfx.config.mc_arb_ramcfg; + case mmGB_TILE_MODE0: + case mmGB_TILE_MODE1: + case mmGB_TILE_MODE2: + case mmGB_TILE_MODE3: + case mmGB_TILE_MODE4: + case mmGB_TILE_MODE5: + case mmGB_TILE_MODE6: + case mmGB_TILE_MODE7: + case mmGB_TILE_MODE8: + case mmGB_TILE_MODE9: + case mmGB_TILE_MODE10: + case mmGB_TILE_MODE11: + case mmGB_TILE_MODE12: + case mmGB_TILE_MODE13: + case mmGB_TILE_MODE14: + case mmGB_TILE_MODE15: + case mmGB_TILE_MODE16: + case mmGB_TILE_MODE17: + case mmGB_TILE_MODE18: + case mmGB_TILE_MODE19: + case mmGB_TILE_MODE20: + case mmGB_TILE_MODE21: + case mmGB_TILE_MODE22: + case mmGB_TILE_MODE23: + case mmGB_TILE_MODE24: + case mmGB_TILE_MODE25: + case mmGB_TILE_MODE26: + case mmGB_TILE_MODE27: + case mmGB_TILE_MODE28: + case mmGB_TILE_MODE29: + case mmGB_TILE_MODE30: + case mmGB_TILE_MODE31: + idx = (reg_offset - mmGB_TILE_MODE0); + return adev->gfx.config.tile_mode_array[idx]; + default: + return RREG32(reg_offset); + } + } } - static int si_read_register(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value) { @@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num, continue; if (!si_allowed_read_registers[i].untouched) - *value = si_allowed_read_registers[i].grbm_indexed ? - si_read_indexed_register(adev, se_num, - sh_num, reg_offset) : - RREG32(reg_offset); + *value = si_get_register_value(adev, + si_allowed_read_registers[i].grbm_indexed, + se_num, sh_num, reg_offset); return 0; } return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h index fde2086246fa..dc9e0e6b4558 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_enums.h +++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h @@ -143,8 +143,8 @@ #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 -#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 -#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 +#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002 +#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ (((op) & 0xFF) << 8) | \ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 7fb9137dd89b..b34cefc7ebd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle) uvd_v4_2_enable_mgcg(adev, true); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); - r = uvd_v4_2_start(adev); - if (r) - goto done; ring->ready = true; r = amdgpu_ring_test_ring(ring); @@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle) amdgpu_ring_commit(ring); done: - if (!r) DRM_INFO("UVD initialized successfully.\n"); @@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; - uvd_v4_2_stop(adev); + if (RREG32(mmUVD_STATUS) != 0) + uvd_v4_2_stop(adev); + ring->ready = false; return 0; @@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t rb_bufsz; int i, j, r; + u32 tmp; /* disable byte swapping */ u32 lmi_swap_cntl = 0; u32 mp_swap_cntl = 0; - WREG32(mmUVD_CGC_GATE, 0); - uvd_v4_2_set_dcm(adev, true); - - uvd_v4_2_mc_resume(adev); + /* set uvd busy */ + WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); - /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); - - /* Stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - mdelay(1); - - /* put LMI, VCPU, RBC etc... into reset */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); - mdelay(5); + uvd_v4_2_set_dcm(adev, true); + WREG32(mmUVD_CGC_GATE, 0); /* take UVD block out of reset */ WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); mdelay(5); - /* initialize UVD memory controller */ - WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | - (1 << 21) | (1 << 9) | (1 << 20)); + /* enable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 1 << 9); + + /* disable interupt */ + WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ @@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) #endif WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); + /* initialize UVD memory controller */ + WREG32(mmUVD_LMI_CTRL, 0x203108); + + tmp = RREG32(mmUVD_MPC_CNTL); + WREG32(mmUVD_MPC_CNTL, tmp | 0x10); WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); WREG32(mmUVD_MPC_SET_MUXA1, 0x0); @@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) WREG32(mmUVD_MPC_SET_ALU, 0); WREG32(mmUVD_MPC_SET_MUX, 0x88); - /* take all subblocks out of reset, except VCPU */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + uvd_v4_2_mc_resume(adev); - /* enable VCPU clock */ - WREG32(mmUVD_VCPU_CNTL, 1 << 9); + tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); + WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); /* enable UMC */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); - /* boot up the VCPU */ - WREG32(mmUVD_SOFT_RESET, 0); + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); + + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); for (i = 0; i < 10; ++i) { @@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) /* enable interupt */ WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); + WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); + /* force RBC into idle state */ WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); @@ -393,22 +389,57 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) */ static void uvd_v4_2_stop(struct amdgpu_device *adev) { - /* force RBC into idle state */ + uint32_t i, j; + uint32_t status; + WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_STATUS); + if (status & 2) + break; + mdelay(1); + } + if (status & 2) + break; + } + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_LMI_STATUS); + if (status & 0xf) + break; + mdelay(1); + } + if (status & 0xf) + break; + } + /* Stall UMC and register bus before resetting VCPU */ WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - mdelay(1); - /* put VCPU into reset */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_LMI_STATUS); + if (status & 0x240) + break; + mdelay(1); + } + if (status & 0x240) + break; + } - /* disable VCPU clock */ - WREG32(mmUVD_VCPU_CNTL, 0x0); + WREG32_P(0x3D49, 0, ~(1 << 2)); - /* Unstall UMC and register bus */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + + WREG32(mmUVD_STATUS, 0); uvd_v4_2_set_dcm(adev, false); } @@ -694,8 +725,26 @@ static int uvd_v4_2_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); + if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { + if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { + WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | + UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | + UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); + mdelay(20); + } + } return 0; } else { + if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { + WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | + UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | + UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); + mdelay(30); + } + } return uvd_v4_2_start(adev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 9b49824233ae..ad8c02e423d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle) uint32_t tmp; int r; - r = uvd_v5_0_start(adev); - if (r) - goto done; + amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); + uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); + uvd_v5_0_enable_mgcg(adev, true); ring->ready = true; r = amdgpu_ring_test_ring(ring); @@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle) amdgpu_ring_write(ring, 3); amdgpu_ring_commit(ring); + done: if (!r) DRM_INFO("UVD initialized successfully.\n"); return r; + } /** @@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; - uvd_v5_0_stop(adev); + if (RREG32(mmUVD_STATUS) != 0) + uvd_v5_0_stop(adev); + ring->ready = false; return 0; @@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev) uvd_v5_0_mc_resume(adev); - amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); - uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); - uvd_v5_0_enable_mgcg(adev, true); - /* disable interupt */ WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); @@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev) /* Unstall UMC and register bus */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + WREG32(mmUVD_STATUS, 0); } /** @@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - if (enable) { /* wait for STATUS to clear */ if (uvd_v5_0_wait_for_idle(handle)) @@ -824,17 +823,12 @@ static int uvd_v5_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - if (state == AMD_PG_STATE_GATE) { uvd_v5_0_stop(adev); - adev->uvd.is_powergated = true; } else { ret = uvd_v5_0_start(adev); if (ret) goto out; - adev->uvd.is_powergated = false; } out: @@ -848,7 +842,8 @@ static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->uvd.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index de7e03544d00..18a6de4e1512 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle) uint32_t tmp; int r; - r = uvd_v6_0_start(adev); - if (r) - goto done; + amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); + uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); + uvd_v6_0_enable_mgcg(adev, true); ring->ready = true; r = amdgpu_ring_test_ring(ring); @@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; - uvd_v6_0_stop(adev); + if (RREG32(mmUVD_STATUS) != 0) + uvd_v6_0_stop(adev); + ring->ready = false; return 0; @@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) lmi_swap_cntl = 0; mp_swap_cntl = 0; - amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); - uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); - uvd_v6_0_enable_mgcg(adev, true); uvd_v6_0_mc_resume(adev); /* disable interupt */ @@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev) /* Unstall UMC and register bus */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + WREG32(mmUVD_STATUS, 0); } /** @@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - if (enable) { /* wait for STATUS to clear */ if (uvd_v6_0_wait_for_idle(handle)) @@ -1049,19 +1047,14 @@ static int uvd_v6_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); - adev->uvd.is_powergated = true; } else { ret = uvd_v6_0_start(adev); if (ret) goto out; - adev->uvd.is_powergated = false; } out: @@ -1075,7 +1068,8 @@ static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->uvd.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 38ed903dd6f8..9ea99348e493 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -42,10 +42,9 @@ #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 -static void vce_v2_0_mc_resume(struct amdgpu_device *adev); static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev); -static int vce_v2_0_wait_for_idle(void *handle); + /** * vce_v2_0_ring_get_rptr - get read pointer * @@ -140,6 +139,86 @@ static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev) return -ETIMEDOUT; } +static void vce_v2_0_disable_cg(struct amdgpu_device *adev) +{ + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7); +} + +static void vce_v2_0_init_cg(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp &= ~0xfff; + tmp |= ((0 << 0) | (4 << 4)); + tmp |= 0x40000; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0xfff; + tmp |= ((0 << 0) | (4 << 4)); + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0x10; + tmp &= ~0x100000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); +} + +static void vce_v2_0_mc_resume(struct amdgpu_device *adev) +{ + uint64_t addr = adev->vce.gpu_addr; + uint32_t size; + + WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); + WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); + WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); + WREG32(mmVCE_CLOCK_GATING_B, 0xf7); + + WREG32(mmVCE_LMI_CTRL, 0x00398000); + WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); + WREG32(mmVCE_LMI_SWAP_CNTL, 0); + WREG32(mmVCE_LMI_SWAP_CNTL1, 0); + WREG32(mmVCE_LMI_VM_CTRL, 0); + + addr += AMDGPU_VCE_FIRMWARE_OFFSET; + size = VCE_V2_0_FW_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE0, size); + + addr += size; + size = VCE_V2_0_STACK_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE1, size); + + addr += size; + size = VCE_V2_0_DATA_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE2, size); + + WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); +} + +static bool vce_v2_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); +} + +static int vce_v2_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (vce_v2_0_is_idle(handle)) + return 0; + } + return -ETIMEDOUT; +} + /** * vce_v2_0_start - start VCE block * @@ -152,11 +231,14 @@ static int vce_v2_0_start(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - vce_v2_0_mc_resume(adev); - /* set BUSY flag */ WREG32_P(mmVCE_STATUS, 1, ~1); + vce_v2_0_init_cg(adev); + vce_v2_0_disable_cg(adev); + + vce_v2_0_mc_resume(adev); + ring = &adev->vce.ring[0]; WREG32(mmVCE_RB_RPTR, ring->wptr); WREG32(mmVCE_RB_WPTR, ring->wptr); @@ -189,6 +271,145 @@ static int vce_v2_0_start(struct amdgpu_device *adev) return 0; } +static int vce_v2_0_stop(struct amdgpu_device *adev) +{ + int i, j; + int status; + + if (vce_v2_0_lmi_clean(adev)) { + DRM_INFO("vce is not idle \n"); + return 0; + } +/* + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmVCE_FW_REG_STATUS); + if (!(status & 1)) + break; + mdelay(1); + } + break; + } +*/ + if (vce_v2_0_wait_for_idle(adev)) { + DRM_INFO("VCE is busy, Can't set clock gateing"); + return 0; + } + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8)); + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmVCE_LMI_STATUS); + if (status & 0x240) + break; + mdelay(1); + } + break; + } + + WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1); + + WREG32(mmVCE_STATUS, 0); + + return 0; +} + +static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) +{ + u32 tmp; + + if (gated) { + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0xe70000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp |= 0xff000000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3fc; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); + } else { + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0xe7; + tmp &= ~0xe70000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp |= 0x1fe000; + tmp &= ~0xff000000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp |= 0x3fc; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + } +} + +static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) +{ + u32 orig, tmp; + +/* LMI_MC/LMI_UMC always set in dynamic, + * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} + */ + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp &= ~0x00060006; + +/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */ + if (gated) { + tmp |= 0xe10000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + } else { + tmp |= 0xe1; + tmp &= ~0xe10000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + } + + orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0x1fe000; + tmp &= ~0xff000000; + if (tmp != orig) + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3fc; + if (tmp != orig) + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + + /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ + WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); + + if(gated) + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); +} + +static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable, + bool sw_cg) +{ + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { + if (sw_cg) + vce_v2_0_set_sw_cg(adev, true); + else + vce_v2_0_set_dyn_cg(adev, true); + } else { + vce_v2_0_disable_cg(adev); + + if (sw_cg) + vce_v2_0_set_sw_cg(adev, false); + else + vce_v2_0_set_dyn_cg(adev, false); + } +} + static int vce_v2_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -254,11 +475,8 @@ static int vce_v2_0_hw_init(void *handle) int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vce_v2_0_start(adev); - /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */ - if (r) - return 0; - + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); + vce_v2_0_enable_mgcg(adev, true, false); for (i = 0; i < adev->vce.num_rings; i++) adev->vce.ring[i].ready = false; @@ -312,190 +530,6 @@ static int vce_v2_0_resume(void *handle) return r; } -static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) -{ - u32 tmp; - - if (gated) { - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp |= 0xe70000; - WREG32(mmVCE_CLOCK_GATING_B, tmp); - - tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp |= 0xff000000; - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); - - tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - tmp &= ~0x3fc; - WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); - - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - } else { - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp |= 0xe7; - tmp &= ~0xe70000; - WREG32(mmVCE_CLOCK_GATING_B, tmp); - - tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp |= 0x1fe000; - tmp &= ~0xff000000; - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); - - tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - tmp |= 0x3fc; - WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); - } -} - -static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) -{ - if (vce_v2_0_wait_for_idle(adev)) { - DRM_INFO("VCE is busy, Can't set clock gateing"); - return; - } - - WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100); - - if (vce_v2_0_lmi_clean(adev)) { - DRM_INFO("LMI is busy, Can't set clock gateing"); - return; - } - - WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - WREG32(mmVCE_STATUS, 0); - - if (gated) - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */ - if (gated) { - /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */ - WREG32(mmVCE_CLOCK_GATING_B, 0xe90010); - } else { - /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */ - WREG32(mmVCE_CLOCK_GATING_B, 0x800f1); - } - - /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/; - WREG32(mmVCE_UENC_CLOCK_GATING, 0x40); - - /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ - WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); - - WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100); - if(!gated) { - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - mdelay(100); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - vce_v2_0_firmware_loaded(adev); - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); - } -} - -static void vce_v2_0_disable_cg(struct amdgpu_device *adev) -{ - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7); -} - -static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) -{ - bool sw_cg = false; - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { - if (sw_cg) - vce_v2_0_set_sw_cg(adev, true); - else - vce_v2_0_set_dyn_cg(adev, true); - } else { - vce_v2_0_disable_cg(adev); - - if (sw_cg) - vce_v2_0_set_sw_cg(adev, false); - else - vce_v2_0_set_dyn_cg(adev, false); - } -} - -static void vce_v2_0_init_cg(struct amdgpu_device *adev) -{ - u32 tmp; - - tmp = RREG32(mmVCE_CLOCK_GATING_A); - tmp &= ~0xfff; - tmp |= ((0 << 0) | (4 << 4)); - tmp |= 0x40000; - WREG32(mmVCE_CLOCK_GATING_A, tmp); - - tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp &= ~0xfff; - tmp |= ((0 << 0) | (4 << 4)); - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); - - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp |= 0x10; - tmp &= ~0x100000; - WREG32(mmVCE_CLOCK_GATING_B, tmp); -} - -static void vce_v2_0_mc_resume(struct amdgpu_device *adev) -{ - uint64_t addr = adev->vce.gpu_addr; - uint32_t size; - - WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); - WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); - WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); - WREG32(mmVCE_CLOCK_GATING_B, 0xf7); - - WREG32(mmVCE_LMI_CTRL, 0x00398000); - WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); - WREG32(mmVCE_LMI_SWAP_CNTL, 0); - WREG32(mmVCE_LMI_SWAP_CNTL1, 0); - WREG32(mmVCE_LMI_VM_CTRL, 0); - - addr += AMDGPU_VCE_FIRMWARE_OFFSET; - size = VCE_V2_0_FW_SIZE; - WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); - WREG32(mmVCE_VCPU_CACHE_SIZE0, size); - - addr += size; - size = VCE_V2_0_STACK_SIZE; - WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); - WREG32(mmVCE_VCPU_CACHE_SIZE1, size); - - addr += size; - size = VCE_V2_0_DATA_SIZE; - WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); - WREG32(mmVCE_VCPU_CACHE_SIZE2, size); - - WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); - - vce_v2_0_init_cg(adev); -} - -static bool vce_v2_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); -} - -static int vce_v2_0_wait_for_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned i; - - for (i = 0; i < adev->usec_timeout; i++) { - if (vce_v2_0_is_idle(handle)) - return 0; - } - return -ETIMEDOUT; -} - static int vce_v2_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -539,33 +573,20 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - else - tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - - static int vce_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - + bool sw_cg = false; - vce_v2_0_set_bypass_mode(adev, enable); + struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (state == AMD_CG_STATE_GATE) + if (state == AMD_CG_STATE_GATE) { gate = true; + sw_cg = true; + } - vce_v2_0_enable_mgcg(adev, gate); + vce_v2_0_enable_mgcg(adev, gate, sw_cg); return 0; } @@ -582,12 +603,8 @@ static int vce_v2_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) - return 0; - if (state == AMD_PG_STATE_GATE) - /* XXX do we need a vce_v2_0_stop()? */ - return 0; + return vce_v2_0_stop(adev); else return vce_v2_0_start(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 8db26559fd1b..93ec8815bb13 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev) struct amdgpu_ring *ring; int idx, r; - vce_v3_0_override_vce_clock_gating(adev, true); - if (!(adev->flags & AMD_IS_APU)) - amdgpu_asic_set_vce_clocks(adev, 10000, 10000); - ring = &adev->vce.ring[0]; WREG32(mmVCE_RB_RPTR, ring->wptr); WREG32(mmVCE_RB_WPTR, ring->wptr); @@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle) int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vce_v3_0_start(adev); - if (r) - return r; + vce_v3_0_override_vce_clock_gating(adev, true); + if (!(adev->flags & AMD_IS_APU)) + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); for (i = 0; i < adev->vce.num_rings; i++) adev->vce.ring[i].ready = false; @@ -514,6 +510,8 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32(mmVCE_LMI_SWAP_CNTL, 0); WREG32(mmVCE_LMI_SWAP_CNTL1, 0); WREG32(mmVCE_LMI_VM_CTRL, 0); + WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000); + if (adev->asic_type >= CHIP_STONEY) { WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); @@ -766,17 +764,14 @@ static int vce_v3_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; - if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) - return 0; - if (state == AMD_PG_STATE_GATE) { - adev->vce.is_powergated = true; - /* XXX do we need a vce_v3_0_stop()? */ + ret = vce_v3_0_stop(adev); + if (ret) + goto out; } else { ret = vce_v3_0_start(adev); if (ret) goto out; - adev->vce.is_powergated = false; } out: @@ -790,7 +785,8 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->vce.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 4922fff08c3c..50bdb24ef8d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -721,6 +721,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { /* enable BM */ pci_set_master(adev->pdev); + adev->has_hw_reset = true; return 0; } udelay(1); |