diff options
author | Dave Airlie <airlied@redhat.com> | 2022-04-29 03:27:04 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2022-04-29 03:27:05 +0300 |
commit | 9d9f720733b7e8d11e4cc53b53f461b117dab839 (patch) | |
tree | 9459ba7739fb3fb96d2fd07e5ddeac74af232bf1 /drivers/gpu | |
parent | 22c73ba44bb05a0e1ed7063d12fb31a461a154ec (diff) | |
parent | fb8cc3318e47e1a0ced4025ef614317b541147e7 (diff) | |
download | linux-9d9f720733b7e8d11e4cc53b53f461b117dab839.tar.xz |
Merge tag 'amd-drm-fixes-5.18-2022-04-27' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amd-drm-fixes-5.18-2022-04-27:
amdgpu:
- Runtime pm fix
- DCN memory leak fix in error path
- SI DPM deadlock fix
- S0ix fix
amdkfd:
- GWS fix
- GWS support for CRIU
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220428023232.5794-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 105 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 83 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 10 |
10 files changed, 165 insertions, 140 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 29e9419a914b..7fd0277b2805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2395,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev) return amdgpu_device_resume(drm_dev, true); } +static int amdgpu_runtime_idle_check_display(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + if (adev->mode_info.num_crtc) { + struct drm_connector *list_connector; + struct drm_connector_list_iter iter; + int ret = 0; + + /* XXX: Return busy if any displays are connected to avoid + * possible display wakeups after runtime resume due to + * hotplug events in case any displays were connected while + * the GPU was in suspend. Remove this once that is fixed. + */ + mutex_lock(&drm_dev->mode_config.mutex); + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->status == connector_status_connected) { + ret = -EBUSY; + break; + } + } + drm_connector_list_iter_end(&iter); + mutex_unlock(&drm_dev->mode_config.mutex); + + if (ret) + return ret; + + if (amdgpu_device_has_dc_support(adev)) { + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, drm_dev) { + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state->active) + ret = -EBUSY; + drm_modeset_unlock(&crtc->mutex); + if (ret < 0) + break; + } + } else { + mutex_lock(&drm_dev->mode_config.mutex); + drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); + + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->dpms == DRM_MODE_DPMS_ON) { + ret = -EBUSY; + break; + } + } + + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); + mutex_unlock(&drm_dev->mode_config.mutex); + } + if (ret) + return ret; + } + + return 0; +} + static int amdgpu_pmops_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -2407,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) return -EBUSY; } + ret = amdgpu_runtime_idle_check_display(dev); + if (ret) + return ret; + /* wait for all rings to drain before suspending */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -2516,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) return -EBUSY; } - if (amdgpu_device_has_dc_support(adev)) { - struct drm_crtc *crtc; - - drm_for_each_crtc(crtc, drm_dev) { - drm_modeset_lock(&crtc->mutex, NULL); - if (crtc->state->active) - ret = -EBUSY; - drm_modeset_unlock(&crtc->mutex); - if (ret < 0) - break; - } - - } else { - struct drm_connector *list_connector; - struct drm_connector_list_iter iter; - - mutex_lock(&drm_dev->mode_config.mutex); - drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); - - drm_connector_list_iter_begin(drm_dev, &iter); - drm_for_each_connector_iter(list_connector, &iter) { - if (list_connector->dpms == DRM_MODE_DPMS_ON) { - ret = -EBUSY; - break; - } - } - - drm_connector_list_iter_end(&iter); - - drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); - mutex_unlock(&drm_dev->mode_config.mutex); - } - - if (ret == -EBUSY) - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); + ret = amdgpu_runtime_idle_check_display(dev); pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5228421b0f72..7c956cf21bc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -1151,6 +1151,16 @@ static int gmc_v10_0_set_clockgating_state(void *handle, int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* + * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled + * is a new problem observed at DF 3.0.3, however with the same suspend sequence not + * seen any issue on the DF 3.0.2 series platform. + */ + if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) { + dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n"); + return 0; + } + r = adev->mmhub.funcs->set_clockgating(adev, state); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index acf4f7975850..198672264492 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, } static void increment_queue_count(struct device_queue_manager *dqm, - enum kfd_queue_type type) + struct qcm_process_device *qpd, + struct queue *q) { dqm->active_queue_count++; - if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || + q->properties.type == KFD_QUEUE_TYPE_DIQ) dqm->active_cp_queue_count++; + + if (q->properties.is_gws) { + dqm->gws_queue_count++; + qpd->mapped_gws_queue = true; + } } static void decrement_queue_count(struct device_queue_manager *dqm, - enum kfd_queue_type type) + struct qcm_process_device *qpd, + struct queue *q) { dqm->active_queue_count--; - if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || + q->properties.type == KFD_QUEUE_TYPE_DIQ) dqm->active_cp_queue_count--; + + if (q->properties.is_gws) { + dqm->gws_queue_count--; + qpd->mapped_gws_queue = false; + } } /* @@ -412,7 +426,7 @@ add_queue_to_list: list_add(&q->list, &qpd->queues_list); qpd->queue_count++; if (q->properties.is_active) - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, qpd, q); /* * Unconditionally increment this counter, regardless of the queue's @@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, deallocate_vmid(dqm, qpd, q); } qpd->queue_count--; - if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } - } + if (q->properties.is_active) + decrement_queue_count(dqm, qpd, q); return retval; } @@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, * dqm->active_queue_count to determine whether a new runlist must be * uploaded. */ - if (q->properties.is_active && !prev_active) - increment_queue_count(dqm, q->properties.type); - else if (!q->properties.is_active && prev_active) - decrement_queue_count(dqm, q->properties.type); - - if (q->gws && !q->properties.is_gws) { + if (q->properties.is_active && !prev_active) { + increment_queue_count(dqm, &pdd->qpd, q); + } else if (!q->properties.is_active && prev_active) { + decrement_queue_count(dqm, &pdd->qpd, q); + } else if (q->gws && !q->properties.is_gws) { if (q->properties.is_active) { dqm->gws_queue_count++; pdd->qpd.mapped_gws_queue = true; @@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = false; - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } + decrement_queue_count(dqm, qpd, q); if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) continue; @@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, continue; q->properties.is_active = false; - decrement_queue_count(dqm, q->properties.type); + decrement_queue_count(dqm, qpd, q); } pdd->last_evict_timestamp = get_jiffies_64(); retval = execute_queues_cpsch(dqm, @@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = true; - increment_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count++; - qpd->mapped_gws_queue = true; - } + increment_queue_count(dqm, qpd, q); if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) continue; @@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, continue; q->properties.is_active = true; - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, &pdd->qpd, q); } retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, dqm->total_queue_count); list_add(&kq->list, &qpd->priv_queue_list); - increment_queue_count(dqm, kq->queue->properties.type); + increment_queue_count(dqm, qpd, kq->queue); qpd->is_debug = true; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); @@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, { dqm_lock(dqm); list_del(&kq->list); - decrement_queue_count(dqm, kq->queue->properties.type); + decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); /* @@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, qpd->queue_count++; if (q->properties.is_active) { - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, qpd, q); execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, list_del(&q->list); qpd->queue_count--; if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); + decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); if (retval == -ETIME) qpd->reset_wavefronts = true; - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } } /* @@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, /* Clean all kernel queues */ list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { list_del(&kq->list); - decrement_queue_count(dqm, kq->queue->properties.type); + decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; dqm->total_queue_count--; filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; @@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } - } + if (q->properties.is_active) + decrement_queue_count(dqm, qpd, q); dqm->total_queue_count--; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 9967a73d5b0f..8f58fc491b28 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1103,7 +1103,7 @@ struct kfd_criu_queue_priv_data { uint32_t priority; uint32_t q_percent; uint32_t doorbell_id; - uint32_t is_gws; + uint32_t gws; uint32_t sdma_id; uint32_t eop_ring_buffer_size; uint32_t ctx_save_restore_area_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 6eca9509f2e3..4f58e671d39b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -636,6 +636,8 @@ static int criu_checkpoint_queue(struct kfd_process_device *pdd, q_data->ctx_save_restore_area_size = q->properties.ctx_save_restore_area_size; + q_data->gws = !!q->gws; + ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack); if (ret) { pr_err("Failed checkpoint queue_mqd (%d)\n", ret); @@ -743,7 +745,6 @@ static void set_queue_properties_from_criu(struct queue_properties *qp, struct kfd_criu_queue_priv_data *q_data) { qp->is_interop = false; - qp->is_gws = q_data->is_gws; qp->queue_percent = q_data->q_percent; qp->priority = q_data->priority; qp->queue_address = q_data->q_address; @@ -826,12 +827,15 @@ int kfd_criu_restore_queue(struct kfd_process *p, NULL); if (ret) { pr_err("Failed to create new queue err:%d\n", ret); - ret = -EINVAL; + goto exit; } + if (q_data->gws) + ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws); + exit: if (ret) - pr_err("Failed to create queue (%d)\n", ret); + pr_err("Failed to restore queue (%d)\n", ret); else pr_debug("Queue id %d was restored successfully\n", queue_id); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 3fe4bfbb98a0..faab59508d82 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -997,6 +997,7 @@ static struct clock_source *dcn21_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 5504d81c77b7..72e7b5d40af6 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -427,6 +427,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int i; if (!adev->pm.dpm_enabled) return; @@ -434,6 +435,15 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) if (!pp_funcs->pm_compute_clocks) return; + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) + amdgpu_fence_wait_empty(ring); + } + mutex_lock(&adev->pm.mutex); pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); mutex_unlock(&adev->pm.mutex); @@ -443,6 +453,20 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { int ret = 0; + if (adev->family == AMDGPU_FAMILY_SI) { + mutex_lock(&adev->pm.mutex); + if (enable) { + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; + } else { + adev->pm.dpm.uvd_active = false; + } + mutex_unlock(&adev->pm.mutex); + + amdgpu_dpm_compute_clocks(adev); + return; + } + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); if (ret) DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", @@ -453,6 +477,21 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { int ret = 0; + if (adev->family == AMDGPU_FAMILY_SI) { + mutex_lock(&adev->pm.mutex); + if (enable) { + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; + } else { + adev->pm.dpm.vce_active = false; + } + mutex_unlock(&adev->pm.mutex); + + amdgpu_dpm_compute_clocks(adev); + return; + } + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); if (ret) DRM_ERROR("Dpm %s vce failed, ret = %d. \n", diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index 9613c6181c17..d3fe149d8476 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -1028,16 +1028,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) void amdgpu_legacy_dpm_compute_clocks(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i = 0; - - if (adev->mode_info.num_crtc) - amdgpu_display_bandwidth_update(adev); - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->sched.ready) - amdgpu_fence_wait_empty(ring); - } amdgpu_dpm_get_active_displays(adev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index caae54487f9c..633dab14f51c 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3892,40 +3892,6 @@ static int si_set_boot_state(struct amdgpu_device *adev) } #endif -static int si_set_powergating_by_smu(void *handle, - uint32_t block_type, - bool gate) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - switch (block_type) { - case AMD_IP_BLOCK_TYPE_UVD: - if (!gate) { - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - } else { - adev->pm.dpm.uvd_active = false; - } - - amdgpu_legacy_dpm_compute_clocks(handle); - break; - case AMD_IP_BLOCK_TYPE_VCE: - if (!gate) { - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; - } else { - adev->pm.dpm.vce_active = false; - } - - amdgpu_legacy_dpm_compute_clocks(handle); - break; - default: - break; - } - return 0; -} - static int si_set_sw_state(struct amdgpu_device *adev) { return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? @@ -8125,7 +8091,6 @@ static const struct amd_pm_funcs si_dpm_funcs = { .print_power_state = &si_dpm_print_power_state, .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, .force_performance_level = &si_dpm_force_performance_level, - .set_powergating_by_smu = &si_set_powergating_by_smu, .vblank_too_short = &si_dpm_vblank_too_short, .set_fan_control_mode = &si_dpm_set_fan_control_mode, .get_fan_control_mode = &si_dpm_get_fan_control_mode, diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index a2da46bf3985..71e9c6ce6b1a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1487,16 +1487,6 @@ static void pp_pm_compute_clocks(void *handle) { struct pp_hwmgr *hwmgr = handle; struct amdgpu_device *adev = hwmgr->adev; - int i = 0; - - if (adev->mode_info.num_crtc) - amdgpu_display_bandwidth_update(adev); - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->sched.ready) - amdgpu_fence_wait_empty(ring); - } if (!amdgpu_device_has_dc_support(adev)) { amdgpu_dpm_get_active_displays(adev); |