summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2022-04-05 12:06:58 +0300
committerMaxime Ripard <maxime@cerno.tech>2022-04-05 12:06:58 +0300
commit9cbbd694a58bdf24def2462276514c90cab7cf80 (patch)
tree98a504890134d34631a6a0ecbce94d3f1ecc21fc /drivers/gpu/drm/amd/amdgpu
parent71d637823cac7748079a912e0373476c7cf6f985 (diff)
parent3123109284176b1532874591f7c81f3837bbdc17 (diff)
downloadlinux-9cbbd694a58bdf24def2462276514c90cab7cf80.tar.xz
Merge drm/drm-next into drm-misc-next
Let's start the 5.19 development cycle. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c683
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c163
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c548
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c250
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c409
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c353
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c230
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c225
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
154 files changed, 4601 insertions, 3234 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 7fedbb725e17..40e2c6e2df79 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -46,18 +46,18 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
amdgpu_gem.o amdgpu_ring.o \
- amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
+ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o \
atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
- amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o \
+ amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o \
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
- amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \
+ amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index bcfdb63b1d42..c6cc493a5486 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -31,6 +31,17 @@
#include "amdgpu_psp.h"
#include "amdgpu_xgmi.h"
+static bool aldebaran_is_mode2_default(struct amdgpu_reset_control *reset_ctl)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
+ adev->gmc.xgmi.connected_to_cpu))
+ return true;
+
+ return false;
+}
+
static struct amdgpu_reset_handler *
aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
@@ -48,7 +59,7 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
}
}
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (aldebaran_is_mode2_default(reset_ctl)) {
list_for_each_entry(handler, &reset_ctl->reset_handlers,
handler_list) {
if (handler->reset_method == AMD_RESET_METHOD_MODE2) {
@@ -136,18 +147,31 @@ static int
aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
- struct amdgpu_device *tmp_adev = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct list_head reset_device_list;
int r = 0;
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
- if (reset_context->hive == NULL) {
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
+ reset_context->hive == NULL) {
/* Wrong context, return error */
return -EINVAL;
}
- list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
- gmc.xgmi.head) {
+ INIT_LIST_HEAD(&reset_device_list);
+ if (reset_context->hive) {
+ list_for_each_entry (tmp_adev,
+ &reset_context->hive->device_list,
+ gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list,
+ &reset_device_list);
+ } else {
+ list_add_tail(&reset_context->reset_req_dev->reset_list,
+ &reset_device_list);
+ }
+
+ list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
}
@@ -155,8 +179,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
* them together so that they can be completed asynchronously on multiple nodes
*/
- list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
- gmc.xgmi.head) {
+ list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work(system_unbound_wq,
@@ -174,9 +197,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry(tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head) {
+ list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->reset_cntl->reset_work);
r = tmp_adev->asic_reset_res;
@@ -186,8 +207,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
}
}
- list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
- gmc.xgmi.head) {
+ list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
}
@@ -260,7 +280,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->resume(adev);
/* Wait for FW reset event complete */
- r = smu_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
+ r = amdgpu_dpm_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
if (r) {
dev_err(adev->dev,
"Failed to get response from firmware after reset\n");
@@ -319,16 +339,30 @@ static int
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
- int r;
struct amdgpu_device *tmp_adev = NULL;
+ struct list_head reset_device_list;
+ int r;
- if (reset_context->hive == NULL) {
+ if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
+ IP_VERSION(13, 0, 2) &&
+ reset_context->hive == NULL) {
/* Wrong context, return error */
return -EINVAL;
}
- list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
- gmc.xgmi.head) {
+ INIT_LIST_HEAD(&reset_device_list);
+ if (reset_context->hive) {
+ list_for_each_entry (tmp_adev,
+ &reset_context->hive->device_list,
+ gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list,
+ &reset_device_list);
+ } else {
+ list_add_tail(&reset_context->reset_req_dev->reset_list,
+ &reset_device_list);
+ }
+
+ list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = aldebaran_mode2_restore_ip(tmp_adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b89406b01694..cdf0818088b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -60,7 +60,6 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
-#include <drm/gpu_scheduler.h>
#include <kgd_kfd_interface.h>
#include "dm_pp_interface.h"
@@ -99,7 +98,6 @@
#include "amdgpu_gem.h"
#include "amdgpu_doorbell.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_smu.h"
#include "amdgpu_discovery.h"
#include "amdgpu_mes.h"
#include "amdgpu_umc.h"
@@ -109,6 +107,7 @@
#include "amdgpu_smuio.h"
#include "amdgpu_fdinfo.h"
#include "amdgpu_mca.h"
+#include "amdgpu_ras.h"
#define MAX_GPU_INSTANCE 16
@@ -155,8 +154,6 @@ extern int amdgpu_vis_vram_limit;
extern int amdgpu_gart_size;
extern int amdgpu_gtt_size;
extern int amdgpu_moverate;
-extern int amdgpu_benchmarking;
-extern int amdgpu_testing;
extern int amdgpu_audio;
extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
@@ -197,7 +194,6 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
-extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight;
@@ -214,6 +210,7 @@ extern int amdgpu_mes;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
+extern int amdgpu_use_xgmi_p2p;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
@@ -235,6 +232,9 @@ extern int amdgpu_cik_support;
#endif
extern int amdgpu_num_kcq;
+#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
+extern int amdgpu_vcnfw_log;
+
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
@@ -276,9 +276,6 @@ extern int amdgpu_num_kcq;
#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
struct amdgpu_device;
-struct amdgpu_ib;
-struct amdgpu_cs_parser;
-struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
@@ -373,7 +370,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
*/
bool amdgpu_get_bios(struct amdgpu_device *adev);
bool amdgpu_read_bios(struct amdgpu_device *adev);
-
+bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
+ u8 *bios, u32 length_bytes);
/*
* Clocks
*/
@@ -466,20 +464,6 @@ struct amdgpu_flip_work {
/*
- * CP & rings.
- */
-
-struct amdgpu_ib {
- struct amdgpu_sa_bo *sa_bo;
- uint32_t length_dw;
- uint64_t gpu_addr;
- uint32_t *ptr;
- uint32_t flags;
-};
-
-extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-
-/*
* file private structure
*/
@@ -494,79 +478,6 @@ struct amdgpu_fpriv {
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
-int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size,
- enum amdgpu_ib_pool_type pool,
- struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f);
-int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, struct amdgpu_job *job,
- struct dma_fence **f);
-int amdgpu_ib_pool_init(struct amdgpu_device *adev);
-void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
-int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-
-/*
- * CS.
- */
-struct amdgpu_cs_chunk {
- uint32_t chunk_id;
- uint32_t length_dw;
- void *kdata;
-};
-
-struct amdgpu_cs_post_dep {
- struct drm_syncobj *syncobj;
- struct dma_fence_chain *chain;
- u64 point;
-};
-
-struct amdgpu_cs_parser {
- struct amdgpu_device *adev;
- struct drm_file *filp;
- struct amdgpu_ctx *ctx;
-
- /* chunks */
- unsigned nchunks;
- struct amdgpu_cs_chunk *chunks;
-
- /* scheduler job object */
- struct amdgpu_job *job;
- struct drm_sched_entity *entity;
-
- /* buffer objects */
- struct ww_acquire_ctx ticket;
- struct amdgpu_bo_list *bo_list;
- struct amdgpu_mn *mn;
- struct amdgpu_bo_list_entry vm_pd;
- struct list_head validated;
- struct dma_fence *fence;
- uint64_t bytes_moved_threshold;
- uint64_t bytes_moved_vis_threshold;
- uint64_t bytes_moved;
- uint64_t bytes_moved_vis;
-
- /* user fence */
- struct amdgpu_bo_list_entry uf_entry;
-
- unsigned num_post_deps;
- struct amdgpu_cs_post_dep *post_deps;
-};
-
-static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
- uint32_t ib_idx, int idx)
-{
- return p->job->ibs[ib_idx].ptr[idx];
-}
-
-static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
- uint32_t ib_idx, int idx,
- uint32_t value)
-{
- p->job->ibs[ib_idx].ptr[idx] = value;
-}
-
/*
* Writeback
*/
@@ -586,13 +497,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
/*
* Benchmarking
*/
-void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
-
-
-/*
- * Testing
- */
-void amdgpu_test_moves(struct amdgpu_device *adev);
+int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
/*
* ASIC specific register table accessible by UMD
@@ -771,6 +676,8 @@ struct amd_powerplay {
const struct amd_pm_funcs *pp_funcs;
};
+struct ip_discovery_top;
+
/* polaris10 kickers */
#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
((rid == 0xE3) || \
@@ -952,12 +859,6 @@ struct amdgpu_device {
/* powerplay */
struct amd_powerplay powerplay;
- bool pp_force_state_enabled;
-
- /* smu */
- struct smu_context smu;
-
- /* dpm */
struct amdgpu_pm pm;
u32 cg_flags;
u32 pg_flags;
@@ -1100,7 +1001,18 @@ struct amdgpu_device {
uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
bool ram_is_direct_mapped;
+
+ struct list_head ras_list;
+
+ struct ip_discovery_top *ip_top;
+
struct amdgpu_reset_domain *reset_domain;
+
+ struct mutex benchmark_mutex;
+
+ /* reset dump register */
+ uint32_t *reset_dump_reg_list;
+ int num_regs;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1299,6 +1211,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
+bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
@@ -1324,6 +1237,10 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
void amdgpu_device_halt(struct amdgpu_device *adev);
+u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
+ u32 reg);
+void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
+ u32 reg, u32 v);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -1411,12 +1328,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
-bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
-static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@ -1425,9 +1340,13 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
enum amdgpu_ss ss_state) { return 0; }
#endif
-int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo,
- struct amdgpu_bo_va_mapping **mapping);
+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+#else
+static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+#endif
#if defined(CONFIG_DRM_AMD_DC)
int amdgpu_dm_display_resume(struct amdgpu_device *adev );
@@ -1455,6 +1374,15 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
enum amd_powergating_state state);
+static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
+{
+ return amdgpu_gpu_recovery != 0 &&
+ adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
+}
+
#include "amdgpu_object.h"
static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4811b0faafd9..0e12315fa0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
}
}
+#if IS_ENABLED(CONFIG_SUSPEND)
+/**
+ * amdgpu_acpi_is_s3_active
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+{
+ return !(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state == PM_SUSPEND_MEM);
+}
+
/**
* amdgpu_acpi_is_s0ix_active
*
@@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
*/
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
{
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
- if (adev->flags & AMD_IS_APU)
- return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
+ if (!(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
+ return false;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
+ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+ return false;
}
-#endif
+
+#if !IS_ENABLED(CONFIG_AMD_PMC)
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
return false;
+#else
+ return true;
+#endif /* CONFIG_AMD_PMC */
}
+
+#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index ac841ae8f5cc..4cb14c2fe53f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -131,6 +131,7 @@ struct amdkfd_process_info {
atomic_t evicted_bos;
struct delayed_work restore_userptr_work;
struct pid *pid;
+ bool block_mmu_notifications;
};
int amdgpu_amdkfd_init(void);
@@ -268,7 +269,7 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_device *adev, uint64_t va, uint64_t size,
void *drm_priv, struct kgd_mem **mem,
- uint64_t *offset, uint32_t flags);
+ uint64_t *offset, uint32_t flags, bool criu_resume);
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
@@ -297,6 +298,10 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
bool reset);
+bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
+void amdgpu_amdkfd_block_mmu_notifications(void *p);
+int amdgpu_amdkfd_criu_resume(void *p);
+
#if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index 46cd4ee6bafb..c8935d718207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -37,10 +37,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
.hqd_destroy = kgd_gfx_v9_hqd_destroy,
.hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
- .address_watch_disable = kgd_gfx_v9_address_watch_disable,
- .address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
- .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index abe93b3ff765..4191af5a3f13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -289,10 +289,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
.hqd_destroy = kgd_gfx_v9_hqd_destroy,
.hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
- .address_watch_disable = kgd_gfx_v9_address_watch_disable,
- .address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
- .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base =
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 7b7f4b2764c1..9378fc79e9ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -671,20 +671,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int kgd_address_watch_disable(struct amdgpu_device *adev)
-{
- return 0;
-}
-
-static int kgd_address_watch_execute(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- return 0;
-}
-
static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
@@ -709,13 +695,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- unsigned int reg_offset)
-{
- return 0;
-}
-
static void set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base)
{
@@ -767,10 +746,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
.wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 1f37d3574001..ba21ec6b35e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -26,6 +26,8 @@
#include "gc/gc_10_3_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
#include "oss/osssys_5_0_0_sh_mask.h"
+#include "athub/athub_2_1_0_offset.h"
+#include "athub/athub_2_1_0_sh_mask.h"
#include "soc15_common.h"
#include "v10_structs.h"
#include "nv.h"
@@ -582,21 +584,6 @@ static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
return 0;
}
-
-static int address_watch_disable_v10_3(struct amdgpu_device *adev)
-{
- return 0;
-}
-
-static int address_watch_execute_v10_3(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- return 0;
-}
-
static int wave_control_execute_v10_3(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
@@ -621,11 +608,16 @@ static int wave_control_execute_v10_3(struct amdgpu_device *adev,
return 0;
}
-static uint32_t address_watch_get_offset_v10_3(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- unsigned int reg_offset)
+static bool get_atc_vmid_pasid_mapping_info_v10_3(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
{
- return 0;
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev,
@@ -809,11 +801,8 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3,
.hqd_destroy = hqd_destroy_v10_3,
.hqd_sdma_destroy = hqd_sdma_destroy_v10_3,
- .address_watch_disable = address_watch_disable_v10_3,
- .address_watch_execute = address_watch_execute_v10_3,
.wave_control_execute = wave_control_execute_v10_3,
- .address_watch_get_offset = address_watch_get_offset_v10_3,
- .get_atc_vmid_pasid_mapping_info = NULL,
+ .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
.program_trap_handler_settings = program_trap_handler_settings_v10_3,
#if 0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 36528dad7684..65552bb7d2f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -45,43 +45,6 @@ enum {
MAX_WATCH_ADDRESSES = 4
};
-enum {
- ADDRESS_WATCH_REG_ADDR_HI = 0,
- ADDRESS_WATCH_REG_ADDR_LO,
- ADDRESS_WATCH_REG_CNTL,
- ADDRESS_WATCH_REG_MAX
-};
-
-/* not defined in the CI/KV reg file */
-enum {
- ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
- ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
- ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
- /* extend the mask to 26 bits to match the low address field */
- ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
- ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
-};
-
-static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
- mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
- mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
- mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
- mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
-};
-
-union TCP_WATCH_CNTL_BITS {
- struct {
- uint32_t mask:24;
- uint32_t vmid:4;
- uint32_t atc:1;
- uint32_t mode:2;
- uint32_t valid:1;
- } bitfields, bits;
- uint32_t u32All;
- signed int i32All;
- float f32All;
-};
-
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
@@ -529,55 +492,6 @@ static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
return 0;
}
-static int kgd_address_watch_disable(struct amdgpu_device *adev)
-{
- union TCP_WATCH_CNTL_BITS cntl;
- unsigned int i;
-
- cntl.u32All = 0;
-
- cntl.bitfields.valid = 0;
- cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
- cntl.bitfields.atc = 1;
-
- /* Turning off this address until we set all the registers */
- for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
- WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL], cntl.u32All);
-
- return 0;
-}
-
-static int kgd_address_watch_execute(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- union TCP_WATCH_CNTL_BITS cntl;
-
- cntl.u32All = cntl_val;
-
- /* Turning off this watch point until we set all the registers */
- cntl.bitfields.valid = 0;
- WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL], cntl.u32All);
-
- WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
-
- WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
-
- /* Enable the watch point */
- cntl.bitfields.valid = 1;
-
- WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL], cntl.u32All);
-
- return 0;
-}
-
static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
@@ -602,13 +516,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- unsigned int reg_offset)
-{
- return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
-}
-
static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
@@ -665,10 +572,7 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
.wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 52832cd69a93..9dc5f2a0cc07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -538,20 +538,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int kgd_address_watch_disable(struct amdgpu_device *adev)
-{
- return 0;
-}
-
-static int kgd_address_watch_execute(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- return 0;
-}
-
static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
@@ -576,13 +562,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- unsigned int reg_offset)
-{
- return 0;
-}
-
static void set_scratch_backing_va(struct amdgpu_device *adev,
uint64_t va, uint32_t vmid)
{
@@ -614,10 +593,7 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
.wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 1abf662a0e91..81e3b528bbc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -622,20 +622,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev)
-{
- return 0;
-}
-
-int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- return 0;
-}
-
int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd)
@@ -660,13 +646,6 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
return 0;
}
-uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- unsigned int reg_offset)
-{
- return 0;
-}
-
void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base)
{
@@ -736,7 +715,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
* process whose pasid is provided as a parameter. The process could have ZERO
* or more queues running and submitting waves to compute units.
*
- * @kgd: Handle of device from which to get number of waves in flight
+ * @adev: Handle of device from which to get number of waves in flight
* @pasid: Identifies the process for which this query call is invoked
* @pasid_wave_cnt: Output parameter updated with number of waves in flight that
* belong to process with given pasid
@@ -745,7 +724,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
*
* Note: It's possible that the device has too many queues (oversubscription)
* in which case a VMID could be remapped to a different PASID. This could lead
- * to an iaccurate wave count. Following is a high-level sequence:
+ * to an inaccurate wave count. Following is a high-level sequence:
* Time T1: vmid = getVmid(); vmid is associated with Pasid P1
* Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
* In the sequence above wave count obtained from time T1 will be incorrectly
@@ -888,10 +867,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_gfx_v9_hqd_destroy,
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_gfx_v9_address_watch_disable,
- .address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
- .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 24be49df26fd..c7ed3bc9053c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -46,19 +46,9 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
-int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev);
-int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd);
-uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b6f266f612ea..900ed2a7483b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -121,7 +121,7 @@ static size_t amdgpu_amdkfd_acc_size(uint64_t size)
}
/**
- * @amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
+ * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
* of buffer including any reserved for control structures
*
* @adev: Device to which allocated BO belongs to
@@ -743,7 +743,7 @@ unwind:
continue;
if (attachment[i]->bo_va) {
amdgpu_bo_reserve(bo[i], true);
- amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
+ amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
amdgpu_bo_unreserve(bo[i]);
list_del(&attachment[i]->list);
}
@@ -760,7 +760,7 @@ static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
pr_debug("\t remove VA 0x%llx in entry %p\n",
attachment->va, attachment);
- amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
+ amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
drm_gem_object_put(&bo->tbo.base);
list_del(&attachment->list);
kfree(attachment);
@@ -807,7 +807,8 @@ static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
*
* Returns 0 for success, negative errno for errors.
*/
-static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
+static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
+ bool criu_resume)
{
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
@@ -829,6 +830,18 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
goto out;
}
+ if (criu_resume) {
+ /*
+ * During a CRIU restore operation, the userptr buffer objects
+ * will be validated in the restore_userptr_work worker at a
+ * later stage when it is scheduled by another ioctl called by
+ * CRIU master process for the target pid for restore.
+ */
+ atomic_inc(&mem->invalid);
+ mutex_unlock(&process_info->lock);
+ return 0;
+ }
+
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
@@ -1417,10 +1430,39 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
return avm->pd_phys_addr;
}
+void amdgpu_amdkfd_block_mmu_notifications(void *p)
+{
+ struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
+
+ mutex_lock(&pinfo->lock);
+ WRITE_ONCE(pinfo->block_mmu_notifications, true);
+ mutex_unlock(&pinfo->lock);
+}
+
+int amdgpu_amdkfd_criu_resume(void *p)
+{
+ int ret = 0;
+ struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
+
+ mutex_lock(&pinfo->lock);
+ pr_debug("scheduling work\n");
+ atomic_inc(&pinfo->evicted_bos);
+ if (!READ_ONCE(pinfo->block_mmu_notifications)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ WRITE_ONCE(pinfo->block_mmu_notifications, false);
+ schedule_delayed_work(&pinfo->restore_userptr_work, 0);
+
+out_unlock:
+ mutex_unlock(&pinfo->lock);
+ return ret;
+}
+
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_device *adev, uint64_t va, uint64_t size,
void *drm_priv, struct kgd_mem **mem,
- uint64_t *offset, uint32_t flags)
+ uint64_t *offset, uint32_t flags, bool criu_resume)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
enum ttm_bo_type bo_type = ttm_bo_type_device;
@@ -1523,7 +1565,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- ret = init_user_pages(*mem, user_addr);
+ pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
+ ret = init_user_pages(*mem, user_addr, criu_resume);
if (ret)
goto allocate_init_user_pages_failed;
} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
@@ -1778,12 +1821,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
true);
ret = unreserve_bo_and_vms(&ctx, false, false);
- /* Only apply no TLB flush on Aldebaran to
- * workaround regressions on other Asics.
- */
- if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
- *table_freed = true;
-
goto out;
out_unreserve:
@@ -2033,6 +2070,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
int evicted_bos;
int r = 0;
+ /* Do not process MMU notifications until stage-4 IOCTL is received */
+ if (READ_ONCE(process_info->block_mmu_notifications))
+ return 0;
+
atomic_inc(&mem->invalid);
evicted_bos = atomic_inc_return(&process_info->evicted_bos);
if (evicted_bos == 1) {
@@ -2600,3 +2641,14 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
return 0;
}
+
+bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
+{
+ struct kfd_mem_attachment *entry;
+
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if (entry->is_mapped && entry->adev == adev)
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 12a6b1c99c93..9ba4817a9148 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1083,6 +1083,7 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
return 0;
}
+#ifdef CONFIG_DRM_AMDGPU_SI
int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
u32 clock,
bool strobe_mode,
@@ -1503,6 +1504,7 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
}
return -EINVAL;
}
+#endif
bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 27e74b1fc260..4153d520e2a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -160,6 +160,7 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_clock_dividers *dividers);
+#ifdef CONFIG_DRM_AMDGPU_SI
int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
u32 clock,
bool strobe_mode,
@@ -179,6 +180,17 @@ int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
u8 module_index,
struct atom_mc_reg_table *reg_table);
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+ u16 *voltage,
+ u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+ u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id);
+#endif
bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
@@ -190,21 +202,11 @@ void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
- u16 voltage_id, u16 *voltage);
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
- u16 *voltage,
- u16 leakage_idx);
-void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
- u16 *vddc, u16 *vddci, u16 *mvdd);
int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
u8 clock_type,
u32 clock,
bool strobe_mode,
struct atom_clock_dividers *dividers);
-int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
- u8 voltage_type,
- u8 *svd_gpio_id, u8 *svc_gpio_id);
int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
uint32_t table,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 313517f7cf10..edc6377ec5ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -29,14 +29,13 @@
#define AMDGPU_BENCHMARK_COMMON_MODES_N 17
static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
- uint64_t saddr, uint64_t daddr, int n)
+ uint64_t saddr, uint64_t daddr, int n, s64 *time_ms)
{
- unsigned long start_jiffies;
- unsigned long end_jiffies;
+ ktime_t stime, etime;
struct dma_fence *fence;
int i, r;
- start_jiffies = jiffies;
+ stime = ktime_get();
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
@@ -48,120 +47,81 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
if (r)
goto exit_do_move;
}
- end_jiffies = jiffies;
- r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
+ etime = ktime_get();
+ *time_ms = ktime_ms_delta(etime, stime);
+
return r;
}
-static void amdgpu_benchmark_log_results(int n, unsigned size,
- unsigned int time,
+static void amdgpu_benchmark_log_results(struct amdgpu_device *adev,
+ int n, unsigned size,
+ s64 time_ms,
unsigned sdomain, unsigned ddomain,
char *kind)
{
- unsigned int throughput = (n * (size >> 10)) / time;
- DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
- " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
- kind, n, size >> 10, sdomain, ddomain, time,
+ s64 throughput = (n * (size >> 10));
+
+ throughput = div64_s64(throughput, time_ms);
+
+ dev_info(adev->dev, "amdgpu: %s %u bo moves of %u kB from"
+ " %d to %d in %lld ms, throughput: %lld Mb/s or %lld MB/s\n",
+ kind, n, size >> 10, sdomain, ddomain, time_ms,
throughput * 8, throughput);
}
-static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
- unsigned sdomain, unsigned ddomain)
+static int amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+ unsigned sdomain, unsigned ddomain)
{
struct amdgpu_bo *dobj = NULL;
struct amdgpu_bo *sobj = NULL;
- struct amdgpu_bo_param bp;
uint64_t saddr, daddr;
+ s64 time_ms;
int r, n;
- int time;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = sdomain;
- bp.flags = 0;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
n = AMDGPU_BENCHMARK_ITERATIONS;
- r = amdgpu_bo_create(adev, &bp, &sobj);
- if (r) {
- goto out_cleanup;
- }
- r = amdgpu_bo_reserve(sobj, false);
- if (unlikely(r != 0))
- goto out_cleanup;
- r = amdgpu_bo_pin(sobj, sdomain);
- if (r) {
- amdgpu_bo_unreserve(sobj);
- goto out_cleanup;
- }
- r = amdgpu_ttm_alloc_gart(&sobj->tbo);
- amdgpu_bo_unreserve(sobj);
- if (r) {
- goto out_cleanup;
- }
- saddr = amdgpu_bo_gpu_offset(sobj);
- bp.domain = ddomain;
- r = amdgpu_bo_create(adev, &bp, &dobj);
- if (r) {
- goto out_cleanup;
- }
- r = amdgpu_bo_reserve(dobj, false);
- if (unlikely(r != 0))
- goto out_cleanup;
- r = amdgpu_bo_pin(dobj, ddomain);
- if (r) {
- amdgpu_bo_unreserve(sobj);
+
+ r = amdgpu_bo_create_kernel(adev, size,
+ PAGE_SIZE, sdomain,
+ &sobj,
+ &saddr,
+ NULL);
+ if (r)
goto out_cleanup;
- }
- r = amdgpu_ttm_alloc_gart(&dobj->tbo);
- amdgpu_bo_unreserve(dobj);
- if (r) {
+ r = amdgpu_bo_create_kernel(adev, size,
+ PAGE_SIZE, ddomain,
+ &dobj,
+ &daddr,
+ NULL);
+ if (r)
goto out_cleanup;
- }
- daddr = amdgpu_bo_gpu_offset(dobj);
if (adev->mman.buffer_funcs) {
- time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
- if (time < 0)
+ r = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n, &time_ms);
+ if (r)
goto out_cleanup;
- if (time > 0)
- amdgpu_benchmark_log_results(n, size, time,
+ else
+ amdgpu_benchmark_log_results(adev, n, size, time_ms,
sdomain, ddomain, "dma");
}
out_cleanup:
/* Check error value now. The value can be overwritten when clean up.*/
- if (r) {
- DRM_ERROR("Error while benchmarking BO move.\n");
- }
+ if (r < 0)
+ dev_info(adev->dev, "Error while benchmarking BO move.\n");
- if (sobj) {
- r = amdgpu_bo_reserve(sobj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(sobj);
- amdgpu_bo_unreserve(sobj);
- }
- amdgpu_bo_unref(&sobj);
- }
- if (dobj) {
- r = amdgpu_bo_reserve(dobj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(dobj);
- amdgpu_bo_unreserve(dobj);
- }
- amdgpu_bo_unref(&dobj);
- }
+ if (sobj)
+ amdgpu_bo_free_kernel(&sobj, &saddr, NULL);
+ if (dobj)
+ amdgpu_bo_free_kernel(&dobj, &daddr, NULL);
+ return r;
}
-void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
+int amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
{
- int i;
+ int i, r;
static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
640 * 480 * 4,
720 * 480 * 4,
@@ -182,63 +142,119 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
1920 * 1200 * 4
};
+ mutex_lock(&adev->benchmark_mutex);
switch (test_number) {
case 1:
+ dev_info(adev->dev,
+ "benchmark test: %d (simple test, VRAM to GTT and GTT to VRAM)\n",
+ test_number);
/* simple test, VRAM to GTT and GTT to VRAM */
- amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_GTT);
+ r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto done;
break;
case 2:
+ dev_info(adev->dev,
+ "benchmark test: %d (simple test, VRAM to VRAM)\n",
+ test_number);
/* simple test, VRAM to VRAM */
- amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_VRAM);
+ r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
break;
case 3:
+ dev_info(adev->dev,
+ "benchmark test: %d (GTT to VRAM, buffer size sweep, powers of 2)\n",
+ test_number);
/* GTT to VRAM, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_DOMAIN_VRAM);
+ for (i = 1; i <= 16384; i <<= 1) {
+ r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ }
break;
case 4:
+ dev_info(adev->dev,
+ "benchmark test: %d (VRAM to GTT, buffer size sweep, powers of 2)\n",
+ test_number);
/* VRAM to GTT, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_GTT);
+ for (i = 1; i <= 16384; i <<= 1) {
+ r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto done;
+ }
break;
case 5:
+ dev_info(adev->dev,
+ "benchmark test: %d (VRAM to VRAM, buffer size sweep, powers of 2)\n",
+ test_number);
/* VRAM to VRAM, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_VRAM);
+ for (i = 1; i <= 16384; i <<= 1) {
+ r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ }
break;
case 6:
+ dev_info(adev->dev,
+ "benchmark test: %d (GTT to VRAM, buffer size sweep, common modes)\n",
+ test_number);
/* GTT to VRAM, buffer size sweep, common modes */
- for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
- amdgpu_benchmark_move(adev, common_modes[i],
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_DOMAIN_VRAM);
+ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
+ r = amdgpu_benchmark_move(adev, common_modes[i],
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ }
break;
case 7:
+ dev_info(adev->dev,
+ "benchmark test: %d (VRAM to GTT, buffer size sweep, common modes)\n",
+ test_number);
/* VRAM to GTT, buffer size sweep, common modes */
- for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
- amdgpu_benchmark_move(adev, common_modes[i],
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_GTT);
+ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
+ r = amdgpu_benchmark_move(adev, common_modes[i],
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto done;
+ }
break;
case 8:
+ dev_info(adev->dev,
+ "benchmark test: %d (VRAM to VRAM, buffer size sweep, common modes)\n",
+ test_number);
/* VRAM to VRAM, buffer size sweep, common modes */
- for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
- amdgpu_benchmark_move(adev, common_modes[i],
+ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
+ r = amdgpu_benchmark_move(adev, common_modes[i],
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ }
break;
default:
- DRM_ERROR("Unknown benchmark\n");
+ dev_info(adev->dev, "Unknown benchmark %d\n", test_number);
+ r = -EINVAL;
+ break;
}
+
+done:
+ mutex_unlock(&adev->benchmark_mutex);
+
+ return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 27b19503773b..0eddca795e96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -464,3 +464,41 @@ success:
adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false;
return true;
}
+
+/* helper function for soc15 and onwards to read bios from rom */
+bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
+ u8 *bios, u32 length_bytes)
+{
+ u32 *dw_ptr;
+ u32 i, length_dw;
+ u32 rom_index_offset;
+ u32 rom_data_offset;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+ if (!adev->smuio.funcs ||
+ !adev->smuio.funcs->get_rom_index_offset ||
+ !adev->smuio.funcs->get_rom_data_offset)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+
+ rom_index_offset =
+ adev->smuio.funcs->get_rom_index_offset(adev);
+ rom_data_offset =
+ adev->smuio.funcs->get_rom_data_offset(adev);
+
+ /* set rom index to 0 */
+ WREG32(rom_index_offset, 0);
+ /* read out the rom data */
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(rom_data_offset);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index fa20261aa928..673078faa27a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -626,7 +626,7 @@ amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
if (mode->hdisplay != native_mode->hdisplay ||
mode->vdisplay != native_mode->vdisplay)
- memcpy(native_mode, mode, sizeof(*mode));
+ drm_mode_copy(native_mode, mode);
}
}
@@ -635,7 +635,7 @@ amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
- *native_mode = *mode;
+ drm_mode_copy(native_mode, mode);
drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ee59b6928b11..e85e347eb670 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -32,6 +32,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
+#include "amdgpu_cs.h"
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_gmc.h"
@@ -127,8 +128,6 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
@@ -341,7 +340,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
s64 min_us;
- /* Be more aggresive on dGPUs. Try to fill a portion of free
+ /* Be more aggressive on dGPUs. Try to fill a portion of free
* VRAM now.
*/
if (!(adev->flags & AMD_IS_APU))
@@ -585,6 +584,16 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
}
+ /* Move fence waiting after getting reservation lock of
+ * PD root. Then there is no need on a ctx mutex lock.
+ */
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ goto error_validate;
+ }
+
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
@@ -700,7 +709,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
if (parser->ctx) {
- mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
}
if (parser->bo_list)
@@ -775,12 +783,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj);
- r = amdgpu_ring_parse_cs(ring, p, j);
+ r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
if (r)
return r;
} else {
ib->ptr = (uint32_t *)kptr;
- r = amdgpu_ring_patch_cs_in_place(ring, p, j);
+ r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
amdgpu_bo_kunmap(aobj);
if (r)
return r;
@@ -944,7 +952,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (parser->job->uf_addr && ring->funcs->no_user_fence)
return -EINVAL;
- return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
+ return 0;
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1365,7 +1373,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
r = amdgpu_cs_submit(&parser, cs);
-
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
@@ -1511,6 +1518,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
return 0;
default:
+ dma_fence_put(fence);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
new file mode 100644
index 000000000000..30ecc4917f81
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_CS_H__
+#define __AMDGPU_CS_H__
+
+#include "amdgpu_job.h"
+#include "amdgpu_bo_list.h"
+#include "amdgpu_ring.h"
+
+struct amdgpu_bo_va_mapping;
+
+struct amdgpu_cs_chunk {
+ uint32_t chunk_id;
+ uint32_t length_dw;
+ void *kdata;
+};
+
+struct amdgpu_cs_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
+struct amdgpu_cs_parser {
+ struct amdgpu_device *adev;
+ struct drm_file *filp;
+ struct amdgpu_ctx *ctx;
+
+ /* chunks */
+ unsigned nchunks;
+ struct amdgpu_cs_chunk *chunks;
+
+ /* scheduler job object */
+ struct amdgpu_job *job;
+ struct drm_sched_entity *entity;
+
+ /* buffer objects */
+ struct ww_acquire_ctx ticket;
+ struct amdgpu_bo_list *bo_list;
+ struct amdgpu_mn *mn;
+ struct amdgpu_bo_list_entry vm_pd;
+ struct list_head validated;
+ struct dma_fence *fence;
+ uint64_t bytes_moved_threshold;
+ uint64_t bytes_moved_vis_threshold;
+ uint64_t bytes_moved;
+ uint64_t bytes_moved_vis;
+
+ /* user fence */
+ struct amdgpu_bo_list_entry uf_entry;
+
+ unsigned num_post_deps;
+ struct amdgpu_cs_post_dep *post_deps;
+};
+
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **mapping);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index da21e60bb827..c6d4d41c4393 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -98,7 +98,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
+ amdgpu_vm_bo_del(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 468003583b2a..5981c7d9bd48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -23,6 +23,7 @@
*/
#include <drm/drm_auth.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_sched.h"
#include "amdgpu_ras.h"
@@ -204,9 +205,15 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
if (r)
goto error_free_entity;
- ctx->entities[hw_ip][ring] = entity;
+ /* It's not an error if we fail to install the new entity */
+ if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
+ goto cleanup_entity;
+
return 0;
+cleanup_entity:
+ drm_sched_entity_fini(&entity->entity);
+
error_free_entity:
kfree(entity);
@@ -230,13 +237,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
- mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
+ ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
return 0;
}
@@ -255,11 +262,85 @@ static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
kfree(entity);
}
+static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
+ u32 *stable_pstate)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ enum amd_dpm_forced_level current_level;
+
+ current_level = amdgpu_dpm_get_performance_level(adev);
+
+ switch (current_level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
+ break;
+ default:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
+ break;
+ }
+ return 0;
+}
+
+static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
+ u32 stable_pstate)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ enum amd_dpm_forced_level level;
+ int r;
+
+ mutex_lock(&adev->pm.stable_pstate_ctx_lock);
+ if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
+ r = -EBUSY;
+ goto done;
+ }
+
+ switch (stable_pstate) {
+ case AMDGPU_CTX_STABLE_PSTATE_NONE:
+ level = AMD_DPM_FORCED_LEVEL_AUTO;
+ break;
+ case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
+ break;
+ case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
+ break;
+ case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
+ break;
+ case AMDGPU_CTX_STABLE_PSTATE_PEAK:
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ break;
+ default:
+ r = -EINVAL;
+ goto done;
+ }
+
+ r = amdgpu_dpm_force_performance_level(adev, level);
+
+ if (level == AMD_DPM_FORCED_LEVEL_AUTO)
+ adev->pm.stable_pstate_ctx = NULL;
+ else
+ adev->pm.stable_pstate_ctx = ctx;
+done:
+ mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
+
+ return r;
+}
+
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
struct amdgpu_device *adev = ctx->adev;
- unsigned i, j;
+ unsigned i, j, idx;
if (!adev)
return;
@@ -271,7 +352,11 @@ static void amdgpu_ctx_fini(struct kref *ref)
}
}
- mutex_destroy(&ctx->lock);
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE);
+ drm_dev_exit(idx);
+ }
+
kfree(ctx);
}
@@ -467,11 +552,41 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
return 0;
}
+
+
+static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv, uint32_t id,
+ bool set, u32 *stable_pstate)
+{
+ struct amdgpu_ctx *ctx;
+ struct amdgpu_ctx_mgr *mgr;
+ int r;
+
+ if (!fpriv)
+ return -EINVAL;
+
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ ctx = idr_find(&mgr->ctx_handles, id);
+ if (!ctx) {
+ mutex_unlock(&mgr->lock);
+ return -EINVAL;
+ }
+
+ if (set)
+ r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
+ else
+ r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
+
+ mutex_unlock(&mgr->lock);
+ return r;
+}
+
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
int r;
- uint32_t id;
+ uint32_t id, stable_pstate;
int32_t priority;
union drm_amdgpu_ctx *args = data;
@@ -500,6 +615,21 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
case AMDGPU_CTX_OP_QUERY_STATE2:
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
break;
+ case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
+ if (args->in.flags)
+ return -EINVAL;
+ r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
+ if (!r)
+ args->out.pstate.flags = stable_pstate;
+ break;
+ case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
+ if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
+ return -EINVAL;
+ stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
+ if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
+ return -EINVAL;
+ r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index a44b8b8ed39c..d0cbfcea90f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -49,10 +49,10 @@ struct amdgpu_ctx {
bool preamble_presented;
int32_t init_priority;
int32_t override_priority;
- struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;
+ uint32_t stable_pstate;
};
struct amdgpu_ctx_mgr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index c3728061d65a..5d04d24a0d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -730,7 +730,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
return -ENOMEM;
/* version, increment each time something is added */
- config[no_regs++] = 3;
+ config[no_regs++] = 4;
config[no_regs++] = adev->gfx.config.max_shader_engines;
config[no_regs++] = adev->gfx.config.max_tile_pipes;
config[no_regs++] = adev->gfx.config.max_cu_per_sh;
@@ -770,6 +770,9 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
config[no_regs++] = adev->pdev->subsystem_device;
config[no_regs++] = adev->pdev->subsystem_vendor;
+ /* rev==4 APU flag */
+ config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0;
+
while (size && (*pos < no_regs * 4)) {
uint32_t value;
@@ -1122,8 +1125,10 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
+ }
while (size) {
uint32_t value;
@@ -1359,6 +1364,25 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
return 0;
}
+static int amdgpu_debugfs_benchmark(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return r;
+ }
+
+ r = amdgpu_benchmark(adev, val);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return r;
+}
static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
{
@@ -1395,6 +1419,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram,
NULL, "%lld\n");
DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt,
NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark,
+ "%lld\n");
static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
struct dma_fence **fences)
@@ -1587,22 +1613,25 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
return ret;
}
- if (is_support_sw_smu(adev)) {
- ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq);
- if (ret || val > max_freq || val < min_freq)
- return -EINVAL;
- ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
- } else {
- return 0;
+ ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq);
+ if (ret == -EOPNOTSUPP) {
+ ret = 0;
+ goto out;
}
+ if (ret || val > max_freq || val < min_freq) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val);
+ if (ret)
+ ret = -EINVAL;
+out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- if (ret)
- return -EINVAL;
-
- return 0;
+ return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
@@ -1611,6 +1640,91 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
+static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
+ char __user *buf, size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ char reg_offset[12];
+ int i, ret, len = 0;
+
+ if (*pos)
+ return 0;
+
+ memset(reg_offset, 0, 12);
+ ret = down_read_killable(&adev->reset_domain->sem);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < adev->num_regs; i++) {
+ sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]);
+ up_read(&adev->reset_domain->sem);
+ if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
+ return -EFAULT;
+
+ len += strlen(reg_offset);
+ ret = down_read_killable(&adev->reset_domain->sem);
+ if (ret)
+ return ret;
+ }
+
+ up_read(&adev->reset_domain->sem);
+ *pos += len;
+
+ return len;
+}
+
+static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+ const char __user *buf, size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ char reg_offset[11];
+ uint32_t *new, *tmp = NULL;
+ int ret, i = 0, len = 0;
+
+ do {
+ memset(reg_offset, 0, 11);
+ if (copy_from_user(reg_offset, buf + len,
+ min(10, ((int)size-len)))) {
+ ret = -EFAULT;
+ goto error_free;
+ }
+
+ new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto error_free;
+ }
+ tmp = new;
+ if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) {
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ len += ret;
+ i++;
+ } while (len < size);
+
+ ret = down_write_killable(&adev->reset_domain->sem);
+ if (ret)
+ goto error_free;
+
+ swap(adev->reset_dump_reg_list, tmp);
+ adev->num_regs = i;
+ up_write(&adev->reset_domain->sem);
+ ret = size;
+
+error_free:
+ kfree(tmp);
+ return ret;
+}
+
+static const struct file_operations amdgpu_reset_dump_register_list = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_reset_dump_register_list_read,
+ .write = amdgpu_reset_dump_register_list_write,
+ .llseek = default_llseek
+};
+
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -1664,6 +1778,16 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_debugfs_ring_init(adev, ring);
}
+ for ( i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (!amdgpu_vcnfw_log)
+ break;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]);
+ }
+
amdgpu_ras_debugfs_create_all(adev);
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
@@ -1677,6 +1801,10 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
&amdgpu_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
&amdgpu_debugfs_vm_info_fops);
+ debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
+ &amdgpu_benchmark_fops);
+ debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
+ &amdgpu_reset_dump_register_list);
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cd1f9140a878..3987ecb24ef4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -31,6 +31,7 @@
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
@@ -55,7 +56,6 @@
#include "soc15.h"
#include "nv.h"
#include "bif/bif_4_1_d.h"
-#include <linux/pci.h>
#include <linux/firmware.h>
#include "amdgpu_vf_error.h"
@@ -80,14 +80,11 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
+#define AMDGPU_MAX_RETRY_LIMIT 2
+#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
const char *amdgpu_asic_name[] = {
"TAHITI",
@@ -554,7 +551,11 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
/**
* amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
*
- * this function is invoked only the debugfs register access
+ * @adev: amdgpu_device pointer
+ * @reg: mmio/rlc register
+ * @v: value to write
+ *
+ * this function is invoked only for the debugfs register access
*/
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
uint32_t reg, uint32_t v)
@@ -566,7 +567,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
- return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
+ return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
} else if ((reg * 4) >= adev->rmmio_size) {
adev->pcie_wreg(adev, reg * 4, v);
} else {
@@ -1312,6 +1313,31 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
}
+/**
+ * amdgpu_device_should_use_aspm - check if the device should program ASPM
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Confirm whether the module parameter and pcie bridge agree that ASPM should
+ * be set for this device.
+ *
+ * Returns true if it should be used or false if not.
+ */
+bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
+{
+ switch (amdgpu_aspm) {
+ case -1:
+ break;
+ case 0:
+ return false;
+ case 1:
+ return true;
+ default:
+ return false;
+ }
+ return pcie_aspm_enabled(adev->pdev);
+}
+
/* if we get transitioned to only one device, take VGA back */
/**
* amdgpu_device_vga_set_decode - enable/disable vga decode
@@ -1446,7 +1472,8 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
case CHIP_YELLOW_CARP:
break;
case CHIP_CYAN_SKILLFISH:
- if (adev->pdev->device == 0x13FE)
+ if ((adev->pdev->device == 0x13FE) ||
+ (adev->pdev->device == 0x143F))
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
break;
default:
@@ -1507,6 +1534,11 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
}
+ if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
+ dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
+ amdgpu_reset_method = -1;
+ }
+
amdgpu_device_check_smu_prv_buffer_size(adev);
amdgpu_device_check_vm_size(adev);
@@ -1517,7 +1549,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_gmc_tmz_set(adev);
- amdgpu_gmc_noretry_set(adev);
return 0;
}
@@ -1955,27 +1986,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
chip_name = "arcturus";
break;
- case CHIP_RENOIR:
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- case CHIP_NAVI10:
- chip_name = "navi10";
- break;
- case CHIP_NAVI14:
- chip_name = "navi14";
- break;
case CHIP_NAVI12:
chip_name = "navi12";
break;
- case CHIP_VANGOGH:
- chip_name = "vangogh";
- break;
- case CHIP_YELLOW_CARP:
- chip_name = "yellow_carp";
- break;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -2073,6 +2086,8 @@ out:
*/
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct pci_dev *parent;
int i, r;
amdgpu_device_enable_virtual_display(adev);
@@ -2137,6 +2152,18 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
}
+ if (amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ ((adev->flags & AMD_IS_APU) == 0) &&
+ !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
+ adev->flags |= AMD_IS_PX;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ parent = pci_upstream_bridge(adev->pdev);
+ adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
+ }
+
amdgpu_amdkfd_device_probe(adev);
adev->pm.pp_feature = amdgpu_pp_feature_mask;
@@ -2673,6 +2700,12 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = true;
}
+ r = amdgpu_ras_late_init(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_ras_late_init failed %d", r);
+ return r;
+ }
+
amdgpu_ras_set_error_query_ready(adev, true);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
@@ -2687,7 +2720,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
adev->asic_type == CHIP_ALDEBARAN ))
- smu_handle_passthrough_sbr(&adev->smu, true);
+ amdgpu_dpm_handle_passthrough_sbr(adev, true);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
mutex_lock(&mgpu_info.mutex);
@@ -2771,11 +2804,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
}
}
- amdgpu_amdkfd_suspend(adev, false);
-
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ amdgpu_amdkfd_suspend(adev, false);
+
/* Workaroud for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
@@ -2944,7 +2977,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
int i, r;
if (adev->in_s0ix)
- amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+ amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
@@ -3370,9 +3403,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (adev->asic_reset_res)
goto fail;
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->reset_ras_error_count)
- adev->mmhub.ras_funcs->reset_ras_error_count(adev);
+ if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
+ adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
+ adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
} else {
task_barrier_full(&hive->tb);
@@ -3558,8 +3591,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
hash_init(adev->mn_hash);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
+ mutex_init(&adev->pm.stable_pstate_ctx_lock);
+ mutex_init(&adev->benchmark_mutex);
- amdgpu_device_init_apu_flags(adev);
+ amdgpu_device_init_apu_flags(adev);
r = amdgpu_device_check_arguments(adev);
if (r)
@@ -3580,6 +3615,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->reset_list);
+ INIT_LIST_HEAD(&adev->ras_list);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -3629,6 +3666,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
adev->enable_mes = true;
+ /*
+ * Reset domain needs to be present early, before XGMI hive discovered
+ * (if any) and intitialized to use reset sem and in_gpu reset flag
+ * early on during init and before calling to RREG32.
+ */
+ adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
+ if (!adev->reset_domain)
+ return -ENOMEM;
+
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
@@ -3638,20 +3684,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
- /*
- * Reset domain needs to be present early, before XGMI hive discovered
- * (if any) and intitialized to use reset sem and in_gpu reset flag
- * early on during init.
- */
- adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE ,"amdgpu-reset-dev");
- if (!adev->reset_domain)
- return -ENOMEM;
-
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
return r;
+ amdgpu_gmc_noretry_set(adev);
/* Need to get xgmi info early to decide the reset behavior*/
if (adev->gmc.xgmi.supported) {
r = adev->gfxhub.funcs->get_xgmi_info(adev);
@@ -3819,19 +3857,6 @@ fence_driver_init:
} else
adev->ucode_sysfs_en = true;
- if ((amdgpu_testing & 1)) {
- if (adev->accel_working)
- amdgpu_test_moves(adev);
- else
- DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
- }
- if (amdgpu_benchmarking) {
- if (adev->accel_working)
- amdgpu_benchmark(adev, amdgpu_benchmarking);
- else
- DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
- }
-
/*
* Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
* Otherwise the mgpu fan boost feature will be skipped due to the
@@ -4117,7 +4142,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
return 0;
if (adev->in_s0ix)
- amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
+ amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
/* post card */
if (amdgpu_device_need_post(adev)) {
@@ -4420,7 +4445,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
{
int r;
struct amdgpu_hive_info *hive = NULL;
+ int retry_limit = 0;
+retry:
amdgpu_amdkfd_pre_reset(adev);
amdgpu_amdkfd_pre_reset(adev);
@@ -4469,6 +4496,14 @@ error:
}
amdgpu_virt_release_full_gpu(adev, true);
+ if (AMDGPU_RETRY_SRIOV_RESET(r)) {
+ if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
+ retry_limit++;
+ goto retry;
+ } else
+ DRM_ERROR("GPU reset retry is beyond the retry limit\n");
+ }
+
return r;
}
@@ -4660,6 +4695,22 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
+static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
+{
+ uint32_t reg_value;
+ int i;
+
+ lockdep_assert_held(&adev->reset_domain->sem);
+ dump_stack();
+
+ for (i = 0; i < adev->num_regs; i++) {
+ reg_value = RREG32(adev->reset_dump_reg_list[i]);
+ trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], reg_value);
+ }
+
+ return 0;
+}
+
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
@@ -4670,6 +4721,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
+ amdgpu_reset_reg_dumps(tmp_adev);
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
@@ -4718,9 +4770,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (!r && amdgpu_ras_intr_triggered()) {
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (tmp_adev->mmhub.ras_funcs &&
- tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
- tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
+ if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
+ tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
+ tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
}
amdgpu_ras_intr_cleared();
@@ -5183,6 +5235,9 @@ skip_hw_reset:
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
}
+ if (tmp_adev->asic_reset_res)
+ r = tmp_adev->asic_reset_res;
+
tmp_adev->asic_reset_res = 0;
if (r) {
@@ -5746,3 +5801,36 @@ void amdgpu_device_halt(struct amdgpu_device *adev)
pci_disable_device(pdev);
pci_wait_for_pending_transaction(pdev);
}
+
+u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
+ u32 reg)
+{
+ unsigned long flags, address, data;
+ u32 r;
+
+ address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, reg * 4);
+ (void)RREG32(address);
+ r = RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
+ u32 reg, u32 v)
+{
+ unsigned long flags, address, data;
+
+ address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, reg * 4);
+ (void)RREG32(address);
+ WREG32(data, v);
+ (void)RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 81bfee978b74..e4fcbb385a62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -360,8 +360,11 @@ out:
return r;
}
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
+
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
+ amdgpu_discovery_sysfs_fini(adev);
kfree(adev->mman.discovery_bin);
adev->mman.discovery_bin = NULL;
}
@@ -382,6 +385,578 @@ static int amdgpu_discovery_validate_ip(const struct ip *ip)
return 0;
}
+static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip *ip;
+ uint16_t die_offset, ip_offset, num_dies, num_ips;
+ int i, j;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ /* scan harvest bit of all IP data structures */
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
+ if (le16_to_cpu(ip->harvest) == 1) {
+ switch (le16_to_cpu(ip->hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ if (ip->number_instance == 0)
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
+ else
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+next_ip:
+ ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
+ }
+ }
+}
+
+static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct harvest_table *harvest_info;
+ int i;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
+ for (i = 0; i < 32; i++) {
+ if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
+ break;
+
+ switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ if (harvest_info->list[i].number_instance == 0)
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
+ else
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/* ================================================== */
+
+struct ip_hw_instance {
+ struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
+
+ int hw_id;
+ u8 num_instance;
+ u8 major, minor, revision;
+ u8 harvest;
+
+ int num_base_addresses;
+ u32 base_addr[];
+};
+
+struct ip_hw_id {
+ struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
+ int hw_id;
+};
+
+struct ip_die_entry {
+ struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
+ u16 num_ips;
+};
+
+/* -------------------------------------------------- */
+
+struct ip_hw_instance_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
+};
+
+static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
+}
+
+static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
+}
+
+static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
+}
+
+static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
+}
+
+static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
+}
+
+static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
+}
+
+static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
+}
+
+static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ ssize_t res, at;
+ int ii;
+
+ for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
+ /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
+ */
+ if (at + 12 > PAGE_SIZE)
+ break;
+ res = sysfs_emit_at(buf, at, "0x%08X\n",
+ ip_hw_instance->base_addr[ii]);
+ if (res <= 0)
+ break;
+ at += res;
+ }
+
+ return res < 0 ? res : at;
+}
+
+static struct ip_hw_instance_attr ip_hw_attr[] = {
+ __ATTR_RO(hw_id),
+ __ATTR_RO(num_instance),
+ __ATTR_RO(major),
+ __ATTR_RO(minor),
+ __ATTR_RO(revision),
+ __ATTR_RO(harvest),
+ __ATTR_RO(num_base_addresses),
+ __ATTR_RO(base_addr),
+};
+
+static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
+ATTRIBUTE_GROUPS(ip_hw_instance);
+
+#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
+#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
+
+static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+ struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
+
+ if (!ip_hw_attr->show)
+ return -EIO;
+
+ return ip_hw_attr->show(ip_hw_instance, buf);
+}
+
+static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
+ .show = ip_hw_instance_attr_show,
+};
+
+static void ip_hw_instance_release(struct kobject *kobj)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+
+ kfree(ip_hw_instance);
+}
+
+static struct kobj_type ip_hw_instance_ktype = {
+ .release = ip_hw_instance_release,
+ .sysfs_ops = &ip_hw_instance_sysfs_ops,
+ .default_groups = ip_hw_instance_groups,
+};
+
+/* -------------------------------------------------- */
+
+#define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
+
+static void ip_hw_id_release(struct kobject *kobj)
+{
+ struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
+
+ if (!list_empty(&ip_hw_id->hw_id_kset.list))
+ DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
+ kfree(ip_hw_id);
+}
+
+static struct kobj_type ip_hw_id_ktype = {
+ .release = ip_hw_id_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* -------------------------------------------------- */
+
+static void die_kobj_release(struct kobject *kobj);
+static void ip_disc_release(struct kobject *kobj);
+
+struct ip_die_entry_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
+};
+
+#define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
+
+static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
+}
+
+/* If there are more ip_die_entry attrs, other than the number of IPs,
+ * we can make this intro an array of attrs, and then initialize
+ * ip_die_entry_attrs in a loop.
+ */
+static struct ip_die_entry_attribute num_ips_attr =
+ __ATTR_RO(num_ips);
+
+static struct attribute *ip_die_entry_attrs[] = {
+ &num_ips_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
+
+#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
+
+static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!ip_die_entry_attr->show)
+ return -EIO;
+
+ return ip_die_entry_attr->show(ip_die_entry, buf);
+}
+
+static void ip_die_entry_release(struct kobject *kobj)
+{
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!list_empty(&ip_die_entry->ip_kset.list))
+ DRM_ERROR("ip_die_entry->ip_kset is not empty");
+ kfree(ip_die_entry);
+}
+
+static const struct sysfs_ops ip_die_entry_sysfs_ops = {
+ .show = ip_die_entry_attr_show,
+};
+
+static struct kobj_type ip_die_entry_ktype = {
+ .release = ip_die_entry_release,
+ .sysfs_ops = &ip_die_entry_sysfs_ops,
+ .default_groups = ip_die_entry_groups,
+};
+
+static struct kobj_type die_kobj_ktype = {
+ .release = die_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static struct kobj_type ip_discovery_ktype = {
+ .release = ip_disc_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+struct ip_discovery_top {
+ struct kobject kobj; /* ip_discovery/ */
+ struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
+ struct amdgpu_device *adev;
+};
+
+static void die_kobj_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
+ struct ip_discovery_top,
+ die_kset);
+ if (!list_empty(&ip_top->die_kset.list))
+ DRM_ERROR("ip_top->die_kset is not empty");
+}
+
+static void ip_disc_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
+ kobj);
+ struct amdgpu_device *adev = ip_top->adev;
+
+ adev->ip_top = NULL;
+ kfree(ip_top);
+}
+
+static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
+ struct ip_die_entry *ip_die_entry,
+ const size_t _ip_offset, const int num_ips)
+{
+ int ii, jj, kk, res;
+
+ DRM_DEBUG("num_ips:%d", num_ips);
+
+ /* Find all IPs of a given HW ID, and add their instance to
+ * #die/#hw_id/#instance/<attributes>
+ */
+ for (ii = 0; ii < HW_ID_MAX; ii++) {
+ struct ip_hw_id *ip_hw_id = NULL;
+ size_t ip_offset = _ip_offset;
+
+ for (jj = 0; jj < num_ips; jj++) {
+ struct ip *ip;
+ struct ip_hw_instance *ip_hw_instance;
+
+ ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
+ if (amdgpu_discovery_validate_ip(ip) ||
+ le16_to_cpu(ip->hw_id) != ii)
+ goto next_ip;
+
+ DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
+
+ /* We have a hw_id match; register the hw
+ * block if not yet registered.
+ */
+ if (!ip_hw_id) {
+ ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
+ if (!ip_hw_id)
+ return -ENOMEM;
+ ip_hw_id->hw_id = ii;
+
+ kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
+ ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
+ ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
+ res = kset_register(&ip_hw_id->hw_id_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_hw_id kset");
+ kfree(ip_hw_id);
+ return res;
+ }
+ if (hw_id_names[ii]) {
+ res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
+ &ip_hw_id->hw_id_kset.kobj,
+ hw_id_names[ii]);
+ if (res) {
+ DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
+ hw_id_names[ii],
+ kobject_name(&ip_die_entry->ip_kset.kobj));
+ }
+ }
+ }
+
+ /* Now register its instance.
+ */
+ ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
+ base_addr,
+ ip->num_base_address),
+ GFP_KERNEL);
+ if (!ip_hw_instance) {
+ DRM_ERROR("no memory for ip_hw_instance");
+ return -ENOMEM;
+ }
+ ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
+ ip_hw_instance->num_instance = ip->number_instance;
+ ip_hw_instance->major = ip->major;
+ ip_hw_instance->minor = ip->minor;
+ ip_hw_instance->revision = ip->revision;
+ ip_hw_instance->harvest = ip->harvest;
+ ip_hw_instance->num_base_addresses = ip->num_base_address;
+
+ for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
+ ip_hw_instance->base_addr[kk] = ip->base_address[kk];
+
+ kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
+ ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
+ res = kobject_add(&ip_hw_instance->kobj, NULL,
+ "%d", ip_hw_instance->num_instance);
+next_ip:
+ ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct kset *die_kset = &adev->ip_top->die_kset;
+ u16 num_dies, die_offset, num_ips;
+ size_t ip_offset;
+ int ii, res;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (ii = 0; ii < num_dies; ii++) {
+ struct ip_die_entry *ip_die_entry;
+
+ die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ /* Add the die to the kset.
+ *
+ * dhdr->die_id == ii, which was checked in
+ * amdgpu_discovery_reg_base_init().
+ */
+
+ ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
+ if (!ip_die_entry)
+ return -ENOMEM;
+
+ ip_die_entry->num_ips = num_ips;
+
+ kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
+ ip_die_entry->ip_kset.kobj.kset = die_kset;
+ ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
+ res = kset_register(&ip_die_entry->ip_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_die_entry kset");
+ kfree(ip_die_entry);
+ return res;
+ }
+
+ amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips);
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
+{
+ struct kset *die_kset;
+ int res, ii;
+
+ adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
+ if (!adev->ip_top)
+ return -ENOMEM;
+
+ adev->ip_top->adev = adev;
+
+ res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ &adev->dev->kobj, "ip_discovery");
+ if (res) {
+ DRM_ERROR("Couldn't init and add ip_discovery/");
+ goto Err;
+ }
+
+ die_kset = &adev->ip_top->die_kset;
+ kobject_set_name(&die_kset->kobj, "%s", "die");
+ die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.ktype = &die_kobj_ktype;
+ res = kset_register(&adev->ip_top->die_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register die_kset");
+ goto Err;
+ }
+
+ for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
+ ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
+ ip_hw_instance_attrs[ii] = NULL;
+
+ res = amdgpu_discovery_sysfs_recurse(adev);
+
+ return res;
+Err:
+ kobject_put(&adev->ip_top->kobj);
+ return res;
+}
+
+/* -------------------------------------------------- */
+
+#define list_to_kobj(el) container_of(el, struct kobject, entry)
+
+static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
+{
+ struct list_head *el, *tmp;
+ struct kset *hw_id_kset;
+
+ hw_id_kset = &ip_hw_id->hw_id_kset;
+ spin_lock(&hw_id_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
+ list_del_init(el);
+ spin_unlock(&hw_id_kset->list_lock);
+ /* kobject is embedded in ip_hw_instance */
+ kobject_put(list_to_kobj(el));
+ spin_lock(&hw_id_kset->list_lock);
+ }
+ spin_unlock(&hw_id_kset->list_lock);
+ kobject_put(&ip_hw_id->hw_id_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
+{
+ struct list_head *el, *tmp;
+ struct kset *ip_kset;
+
+ ip_kset = &ip_die_entry->ip_kset;
+ spin_lock(&ip_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &ip_kset->list) {
+ list_del_init(el);
+ spin_unlock(&ip_kset->list_lock);
+ amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
+ spin_lock(&ip_kset->list_lock);
+ }
+ spin_unlock(&ip_kset->list_lock);
+ kobject_put(&ip_die_entry->ip_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
+{
+ struct list_head *el, *tmp;
+ struct kset *die_kset;
+
+ die_kset = &adev->ip_top->die_kset;
+ spin_lock(&die_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &die_kset->list) {
+ list_del_init(el);
+ spin_unlock(&die_kset->list_lock);
+ amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
+ spin_lock(&die_kset->list_lock);
+ }
+ spin_unlock(&die_kset->list_lock);
+ kobject_put(&adev->ip_top->die_kset.kobj);
+ kobject_put(&adev->ip_top->kobj);
+}
+
+/* ================================================== */
+
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
@@ -492,6 +1067,8 @@ next_ip:
}
}
+ amdgpu_discovery_sysfs_init(adev);
+
return 0;
}
@@ -545,32 +1122,26 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n
void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
- struct binary_header *bhdr;
- struct harvest_table *harvest_info;
- int i, vcn_harvest_count = 0;
-
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
-
- for (i = 0; i < 32; i++) {
- if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
- break;
+ int vcn_harvest_count = 0;
- switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
- case VCN_HWID:
- vcn_harvest_count++;
- if (harvest_info->list[i].number_instance == 0)
- adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
- else
- adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
- break;
- case DMU_HWID:
- adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
- break;
- default:
- break;
- }
+ /*
+ * Harvest table does not fit Navi1x and legacy GPUs,
+ * so read harvest bit per IP data structure to set
+ * harvest configuration.
+ */
+ if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) {
+ if ((adev->pdev->device == 0x731E &&
+ (adev->pdev->revision == 0xC6 ||
+ adev->pdev->revision == 0xC7)) ||
+ (adev->pdev->device == 0x7340 &&
+ adev->pdev->revision == 0xC9) ||
+ (adev->pdev->device == 0x7360 &&
+ adev->pdev->revision == 0xC7))
+ amdgpu_discovery_read_harvest_bit_per_ip(adev,
+ &vcn_harvest_count);
+ } else {
+ amdgpu_discovery_read_from_harvest_table(adev,
+ &vcn_harvest_count);
}
amdgpu_discovery_harvest_config_quirk(adev);
@@ -674,12 +1245,15 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
break;
default:
@@ -709,12 +1283,15 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
break;
default:
@@ -790,6 +1367,8 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 8):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
default:
@@ -831,6 +1410,8 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 8):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
@@ -846,8 +1427,14 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
{
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+ return 0;
+ }
+
+ if (!amdgpu_device_has_dc_support(adev))
+ return 0;
+
#if defined(CONFIG_DRM_AMD_DC)
- } else if (adev->ip_versions[DCE_HWIP][0]) {
+ if (adev->ip_versions[DCE_HWIP][0]) {
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
@@ -861,6 +1448,8 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
@@ -882,8 +1471,8 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0]);
return -EINVAL;
}
-#endif
}
+#endif
return 0;
}
@@ -904,12 +1493,15 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
break;
default:
@@ -944,8 +1536,10 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(5, 2, 2):
case IP_VERSION(5, 2, 4):
case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 7):
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
break;
default:
@@ -1012,6 +1606,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 192):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
@@ -1038,12 +1633,14 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
break;
default:
@@ -1217,11 +1814,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
amdgpu_discovery_harvest_ip(adev);
-
- if (!adev->mman.discovery_bin) {
- DRM_ERROR("ip discovery uninitialized\n");
- return -EINVAL;
- }
break;
}
@@ -1242,6 +1834,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 4):
@@ -1254,10 +1847,32 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 3):
adev->family = AMDGPU_FAMILY_YC;
break;
+ case IP_VERSION(10, 3, 6):
+ adev->family = AMDGPU_FAMILY_GC_10_3_6;
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->family = AMDGPU_FAMILY_GC_10_3_7;
+ break;
default:
return -EINVAL;
}
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ adev->flags |= AMD_IS_APU;
+ break;
+ default:
+ break;
+ }
+
if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
adev->gmc.xgmi.supported = true;
@@ -1285,7 +1900,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(7, 2, 0):
case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
case IP_VERSION(7, 5, 0):
+ case IP_VERSION(7, 5, 1):
adev->nbio.funcs = &nbio_v7_2_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
break;
@@ -1368,6 +1985,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 9):
+ case IP_VERSION(13, 0, 10):
adev->smuio.funcs = &smuio_v11_0_6_funcs;
break;
case IP_VERSION(13, 0, 2):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index e76b96d55551..fae5c1debfad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -506,28 +506,9 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
*/
if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
- amdgpu_device_asic_has_dc_support(adev->asic_type)) {
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- domain |= AMDGPU_GEM_DOMAIN_GTT;
- break;
- case CHIP_RAVEN:
- /* enable S/G on PCO and RV2 */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
- (adev->apu_flags & AMD_APU_IS_PICASSO))
- domain |= AMDGPU_GEM_DOMAIN_GTT;
- break;
- case CHIP_RENOIR:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
- domain |= AMDGPU_GEM_DOMAIN_GTT;
- break;
-
- default:
- break;
- }
- }
+ amdgpu_device_asic_has_dc_support(adev->asic_type) &&
+ adev->mode_info.gpu_vm_support)
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
#endif
return domain;
@@ -710,9 +691,9 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
return -EINVAL;
}
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
- else if (adev->family == AMDGPU_FAMILY_NV)
+ else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
version = AMD_FMT_MOD_TILE_VER_GFX10;
else
version = AMD_FMT_MOD_TILE_VER_GFX9;
@@ -806,7 +787,7 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
if (adev->family >= AMDGPU_FAMILY_NV) {
int extra_pipe = 0;
- if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
+ if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) &&
pipes == packers && pipes > 1)
extra_pipe = 1;
@@ -1143,7 +1124,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret)
return ret;
- if (dev->mode_config.fb_modifiers_not_supported) {
+ if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
"GFX9+ requires FB check based on format modifier\n");
ret = check_tiling_flags_gfx6(rfb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4c83f1db8a24..bb1c025d9001 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -99,9 +99,11 @@
* - 3.42.0 - Add 16bpc fixed point display support
* - 3.43.0 - Add device hot plug/unplug support
* - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
+ * - 3.45.0 - Add context ioctl stable pstate interface
+ * * 3.46.0 - To enable hot plug amdgpu tests in libdrm
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 44
+#define KMS_DRIVER_MINOR 46
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -109,8 +111,6 @@ int amdgpu_vis_vram_limit;
int amdgpu_gart_size = -1; /* auto */
int amdgpu_gtt_size = -1; /* auto */
int amdgpu_moverate = -1; /* auto */
-int amdgpu_benchmarking;
-int amdgpu_testing;
int amdgpu_audio = -1;
int amdgpu_disp_priority;
int amdgpu_hw_i2c;
@@ -174,10 +174,11 @@ int amdgpu_mes;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
int amdgpu_tmz = -1; /* auto */
-uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
+int amdgpu_use_xgmi_p2p = 1;
+int amdgpu_vcnfw_log;
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -232,20 +233,6 @@ MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc.
module_param_named(moverate, amdgpu_moverate, int, 0600);
/**
- * DOC: benchmark (int)
- * Run benchmarks. The default is 0 (Skip benchmarks).
- */
-MODULE_PARM_DESC(benchmark, "Run benchmark");
-module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
-
-/**
- * DOC: test (int)
- * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
- */
-MODULE_PARM_DESC(test, "Run tests");
-module_param_named(test, amdgpu_testing, int, 0444);
-
-/**
* DOC: audio (int)
* Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
*/
@@ -667,6 +654,13 @@ MODULE_PARM_DESC(force_asic_type,
"A non negative value used to specify the asic type for all supported GPUs");
module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+/**
+ * DOC: use_xgmi_p2p (int)
+ * Enables/disables XGMI P2P interface (0 = disable, 1 = enable).
+ */
+MODULE_PARM_DESC(use_xgmi_p2p,
+ "Enable XGMI P2P interface (0 = disable; 1 = enable (default))");
+module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
#ifdef CONFIG_HSA_AMD
@@ -740,7 +734,7 @@ MODULE_PARM_DESC(debug_largebar,
* systems with a broken CRAT table.
*
* Default is auto (according to asic type, iommu_v2, and crat table, to decide
- * whehter use CRAT)
+ * whether use CRAT)
*/
int ignore_crat;
module_param(ignore_crat, int, 0444);
@@ -844,36 +838,10 @@ MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)
module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
- * DOC: freesync_video (uint)
- * Enable the optimization to adjust front porch timing to achieve seamless
- * mode change experience when setting a freesync supported mode for which full
- * modeset is not needed.
- *
- * The Display Core will add a set of modes derived from the base FreeSync
- * video mode into the corresponding connector's mode list based on commonly
- * used refresh rates and VRR range of the connected display, when users enable
- * this feature. From the userspace perspective, they can see a seamless mode
- * change experience when the change between different refresh rates under the
- * same resolution. Additionally, userspace applications such as Video playback
- * can read this modeset list and change the refresh rate based on the video
- * frame rate. Finally, the userspace can also derive an appropriate mode for a
- * particular refresh rate based on the FreeSync Mode and add it to the
- * connector's mode list.
- *
- * Note: This is an experimental feature.
- *
- * The default value: 0 (off).
- */
-MODULE_PARM_DESC(
- freesync_video,
- "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
-module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
-
-/**
* DOC: reset_method (int)
- * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
+ * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
-MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)");
+MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
/**
@@ -889,6 +857,13 @@ MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8
module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
/**
+ * DOC: vcnfw_log (int)
+ * Enable vcnfw log output for debugging, the default is disabled.
+ */
+MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)");
+module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
+
+/**
* DOC: smu_pptable_id (int)
* Used to override pptable id. id = 0 use VBIOS pptable.
* id > 0 use the soft pptable with specicfied id.
@@ -1942,13 +1917,14 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
/* Aldebaran */
- {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+ {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+ {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+ {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
/* CYAN_SKILLFISH */
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
/* BEIGE_GOBY */
{0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
@@ -1994,6 +1970,28 @@ static bool amdgpu_is_fw_framebuffer(resource_size_t base,
return found;
}
+static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+ int i;
+
+ /* 0 - GPU
+ * 1 - audio
+ * 2 - USB
+ * 3 - UCSI
+ */
+ for (i = 1; i < 4; i++) {
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, i);
+ if (p) {
+ pm_runtime_get_sync(&p->dev);
+ pm_runtime_mark_last_busy(&p->dev);
+ pm_runtime_put_autosuspend(&p->dev);
+ pci_dev_put(p);
+ }
+ }
+}
+
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2011,6 +2009,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
+ amdgpu_aspm = 0;
+
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
@@ -2126,6 +2127,48 @@ retry_init:
if (ret)
DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
+ if (adev->runpm) {
+ /* only need to skip on ATPX */
+ if (amdgpu_device_supports_px(ddev))
+ dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ /* we want direct complete for BOCO */
+ if (amdgpu_device_supports_boco(ddev))
+ dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_MAY_SKIP_RESUME);
+ pm_runtime_use_autosuspend(ddev->dev);
+ pm_runtime_set_autosuspend_delay(ddev->dev, 5000);
+
+ pm_runtime_allow(ddev->dev);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ /*
+ * For runpm implemented via BACO, PMFW will handle the
+ * timing for BACO in and out:
+ * - put ASIC into BACO state only when both video and
+ * audio functions are in D3 state.
+ * - pull ASIC out of BACO state when either video or
+ * audio function is in D0 state.
+ * Also, at startup, PMFW assumes both functions are in
+ * D0 state.
+ *
+ * So if snd driver was loaded prior to amdgpu driver
+ * and audio function was put into D3 state, there will
+ * be no PMFW-aware D-state transition(D0->D3) on runpm
+ * suspend. Thus the BACO will be not correctly kicked in.
+ *
+ * Via amdgpu_get_secondary_funcs(), the audio dev is put
+ * into D0 state. Then there will be a PMFW-aware D-state
+ * transition(D0->D3) on runpm suspend.
+ */
+ if (amdgpu_device_supports_baco(ddev) &&
+ !(adev->flags & AMD_IS_APU) &&
+ (adev->asic_type >= CHIP_NAVI10))
+ amdgpu_get_secondary_funcs(adev);
+ }
+
return 0;
err_pci:
@@ -2137,8 +2180,15 @@ static void
amdgpu_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
drm_dev_unplug(dev);
+
+ if (adev->runpm) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
+
amdgpu_driver_unload_kms(dev);
/*
@@ -2246,13 +2296,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
static int amdgpu_pmops_prepare(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
if (amdgpu_device_supports_boco(drm_dev))
- return pm_runtime_suspended(dev) &&
- pm_suspend_via_firmware();
+ return pm_runtime_suspended(dev);
+
+ /* if we will not support s3 or s2i for the device
+ * then skip suspend
+ */
+ if (!amdgpu_acpi_is_s0ix_active(adev) &&
+ !amdgpu_acpi_is_s3_active(adev))
+ return 1;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 2a786e788627..ecada5eadfe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -30,7 +30,6 @@
#include "amdgpu_eeprom.h"
#define FRU_EEPROM_MADDR 0x60000
-#define I2C_PRODUCT_INFO_OFFSET 0xC0
static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
{
@@ -40,7 +39,13 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
*/
struct atom_context *atom_ctx = adev->mode_info.atom_context;
- /* VBIOS is of the format ###-DXXXYY-##. For SKU identification,
+ /* The i2c access is blocked on VF
+ * TODO: Need other way to get the info
+ */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ /* VBIOS is of the format ###-DXXXYYYY-##. For SKU identification,
* we can use just the "DXXX" portion. If there were more models, we
* could convert the 3 characters to a hex integer and use a switch
* for ease/speed/readability. For now, 2 string comparisons are
@@ -59,17 +64,24 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
case CHIP_ALDEBARAN:
/* All Aldebaran SKUs have the FRU */
return true;
+ case CHIP_SIENNA_CICHLID:
+ if (strnstr(atom_ctx->vbios_version, "D603",
+ sizeof(atom_ctx->vbios_version)))
+ return true;
+ else
+ return false;
default:
return false;
}
}
static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
- unsigned char *buff)
+ unsigned char *buf, size_t buf_size)
{
- int ret, size;
+ int ret;
+ u8 size;
- ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr, buff, 1);
+ ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr, buf, 1);
if (ret < 1) {
DRM_WARN("FRU: Failed to get size field");
return ret;
@@ -78,9 +90,11 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
/* The size returned by the i2c requires subtraction of 0xC0 since the
* size apparently always reports as 0xC0+actual size.
*/
- size = buff[0] - I2C_PRODUCT_INFO_OFFSET;
+ size = buf[0] & 0x3F;
+ size = min_t(size_t, size, buf_size);
- ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr + 1, buff, size);
+ ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr + 1,
+ buf, size);
if (ret < 1) {
DRM_WARN("FRU: Failed to get data field");
return ret;
@@ -91,19 +105,15 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
- unsigned char buff[AMDGPU_PRODUCT_NAME_LEN+2];
+ unsigned char buf[AMDGPU_PRODUCT_NAME_LEN];
u32 addrptr;
int size, len;
- int offset = 2;
if (!is_fru_eeprom_supported(adev))
return 0;
- if (adev->asic_type == CHIP_ALDEBARAN)
- offset = 0;
-
/* If algo exists, it means that the i2c_adapter's initialized */
- if (!adev->pm.smu_i2c.algo) {
+ if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
return -ENODEV;
}
@@ -121,7 +131,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
* and the language field, so just start from 0xb, manufacturer size
*/
addrptr = FRU_EEPROM_MADDR + 0xb;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
return -EINVAL;
@@ -131,7 +141,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
* size field being 1 byte. This pattern continues below.
*/
addrptr += size + 1;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU product name, ret:%d", size);
return -EINVAL;
@@ -143,12 +153,11 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
AMDGPU_PRODUCT_NAME_LEN);
len = AMDGPU_PRODUCT_NAME_LEN - 1;
}
- /* Start at 2 due to buff using fields 0 and 1 for the address */
- memcpy(adev->product_name, &buff[offset], len);
+ memcpy(adev->product_name, buf, len);
adev->product_name[len] = '\0';
addrptr += size + 1;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU product number, ret:%d", size);
return -EINVAL;
@@ -162,11 +171,11 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
len = sizeof(adev->product_number) - 1;
}
- memcpy(adev->product_number, &buff[offset], len);
+ memcpy(adev->product_number, buf, len);
adev->product_number[len] = '\0';
addrptr += size + 1;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU product version, ret:%d", size);
@@ -174,7 +183,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
}
addrptr += size + 1;
- size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
@@ -189,7 +198,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
len = sizeof(adev->serial) - 1;
}
- memcpy(adev->serial, &buff[offset], len);
+ memcpy(adev->serial, buf, len);
adev->serial[len] = '\0';
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 645950a653a0..01cb89ffbd56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -150,7 +150,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
* replaces them with the dummy page (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
@@ -161,13 +161,11 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
uint64_t flags = 0;
int idx;
- if (!adev->gart.ready) {
- WARN(1, "trying to unbind memory from uninitialized GART !\n");
- return -EINVAL;
- }
+ if (!adev->gart.ptr)
+ return;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
- return 0;
+ return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
@@ -188,7 +186,6 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
drm_dev_exit(idx);
- return 0;
}
/**
@@ -204,7 +201,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
* Map the dma_addresses into GART entries (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags,
void *dst)
{
@@ -212,13 +209,8 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
unsigned i, j, t;
int idx;
- if (!adev->gart.ready) {
- WARN(1, "trying to bind memory to uninitialized GART !\n");
- return -EINVAL;
- }
-
if (!drm_dev_enter(adev_to_drm(adev), &idx))
- return 0;
+ return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
@@ -230,7 +222,6 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
}
}
drm_dev_exit(idx);
- return 0;
}
/**
@@ -246,20 +237,14 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr,
uint64_t flags)
{
- if (!adev->gart.ready) {
- WARN(1, "trying to bind memory to uninitialized GART !\n");
- return -EINVAL;
- }
-
if (!adev->gart.ptr)
- return 0;
+ return;
- return amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
- adev->gart.ptr);
+ amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr);
}
/**
@@ -274,6 +259,9 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
{
int i;
+ if (!adev->gart.ptr)
+ return;
+
mb();
amdgpu_device_flush_hdp(adev, NULL);
for (i = 0; i < adev->num_vmhubs; i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 78895413cf9f..8fea3e04e411 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -46,7 +46,6 @@ struct amdgpu_gart {
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
- bool ready;
/* Asic default pte flags */
uint64_t gart_pte_flags;
@@ -58,12 +57,12 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
- int pages);
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
- int pages, dma_addr_t *dma_addr, uint64_t flags,
- void *dst);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
- int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+ int pages);
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags,
+ void *dst);
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags);
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 9b12cab5e606..57b74d35052f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -222,7 +222,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
- amdgpu_vm_bo_rmv(adev, bo_va);
+ amdgpu_vm_bo_del(adev, bo_va);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1916ec84dd71..8fe939976224 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -615,74 +615,35 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
mutex_lock(&adev->gfx.gfx_off_mutex);
- r = smu_get_status_gfxoff(adev, value);
+ r = amdgpu_dpm_get_status_gfxoff(adev, value);
mutex_unlock(&adev->gfx.gfx_off_mutex);
return r;
}
-int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
- struct ras_fs_if fs_info = {
- .sysfs_name = "gfx_err_count",
- };
- struct ras_ih_if ih_info = {
- .cb = amdgpu_gfx_process_ras_data_cb,
- };
-
- if (!adev->gfx.ras_if) {
- adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!adev->gfx.ras_if)
- return -ENOMEM;
- adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
- adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->gfx.ras_if->sub_block_index = 0;
- }
- fs_info.head = ih_info.head = *adev->gfx.ras_if;
- r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
- &fs_info, &ih_info);
+ r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
- goto free;
+ return r;
- if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
if (!amdgpu_persistent_edc_harvesting_supported(adev))
amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
goto late_fini;
- } else {
- /* free gfx ras_if if ras is not supported */
- r = 0;
- goto free;
}
return 0;
late_fini:
- amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
-free:
- kfree(adev->gfx.ras_if);
- adev->gfx.ras_if = NULL;
+ amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
-void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
-{
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
- adev->gfx.ras_if) {
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- .cb = amdgpu_gfx_process_ras_data_cb,
- };
-
- amdgpu_ras_late_fini(adev, ras_if, &ih_info);
- kfree(ras_if);
- }
-}
-
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry)
@@ -695,9 +656,9 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
*/
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->query_ras_error_count)
- adev->gfx.ras_funcs->query_ras_error_count(adev, err_data);
+ if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
+ adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
amdgpu_ras_reset_gpu(adev);
}
return AMDGPU_RAS_SUCCESS;
@@ -852,19 +813,3 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
}
return amdgpu_num_kcq;
}
-
-/* amdgpu_gfx_state_change_set - Handle gfx power state change set
- * @adev: amdgpu_device pointer
- * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
- *
- */
-
-void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
-{
- mutex_lock(&adev->pm.mutex);
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->gfx_state_change_set)
- ((adev)->powerplay.pp_funcs->gfx_state_change_set(
- (adev)->powerplay.pp_handle, state));
- mutex_unlock(&adev->pm.mutex);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index f851196c83a5..dcb3c7871c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -31,6 +31,7 @@
#include "amdgpu_ring.h"
#include "amdgpu_rlc.h"
#include "soc15.h"
+#include "amdgpu_ras.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -47,12 +48,6 @@ enum amdgpu_gfx_pipe_priority {
AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2
};
-/* Argument for PPSMC_MSG_GpuChangeState */
-enum gfx_change_state {
- sGpuChangeState_D0Entry = 1,
- sGpuChangeState_D3Entry,
-};
-
#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
@@ -204,16 +199,8 @@ struct amdgpu_cu_info {
uint32_t bitmap[4][4];
};
-struct amdgpu_gfx_ras_funcs {
- int (*ras_late_init)(struct amdgpu_device *adev);
- void (*ras_fini)(struct amdgpu_device *adev);
- int (*ras_error_inject)(struct amdgpu_device *adev,
- void *inject_if);
- int (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- void (*reset_ras_error_count)(struct amdgpu_device *adev);
- void (*query_ras_error_status)(struct amdgpu_device *adev);
- void (*reset_ras_error_status)(struct amdgpu_device *adev);
+struct amdgpu_gfx_ras {
+ struct amdgpu_ras_block_object ras_block;
void (*enable_watchdog_timer)(struct amdgpu_device *adev);
};
@@ -337,7 +324,7 @@ struct amdgpu_gfx {
/*ras */
struct ras_common_if *ras_if;
- const struct amdgpu_gfx_ras_funcs *ras_funcs;
+ struct amdgpu_gfx_ras *ras;
};
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
@@ -399,8 +386,7 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
-int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
@@ -410,5 +396,4 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
-void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 2430d6223c2d..ca2cfb65f976 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -436,82 +436,25 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
} while (fault->timestamp < tmp);
}
-int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->ras_late_init) {
- r = adev->umc.ras_funcs->ras_late_init(adev);
- if (r)
- return r;
- }
-
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->ras_late_init) {
- r = adev->mmhub.ras_funcs->ras_late_init(adev);
- if (r)
- return r;
- }
-
- if (!adev->gmc.xgmi.connected_to_cpu)
- adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
-
- if (adev->gmc.xgmi.ras_funcs &&
- adev->gmc.xgmi.ras_funcs->ras_late_init) {
- r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
- if (r)
- return r;
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ adev->gmc.xgmi.ras = &xgmi_ras;
+ amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block);
+ adev->gmc.xgmi.ras_if = &adev->gmc.xgmi.ras->ras_block.ras_comm;
}
- if (adev->hdp.ras_funcs &&
- adev->hdp.ras_funcs->ras_late_init) {
- r = adev->hdp.ras_funcs->ras_late_init(adev);
- if (r)
- return r;
- }
-
- if (adev->mca.mp0.ras_funcs &&
- adev->mca.mp0.ras_funcs->ras_late_init) {
- r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
- if (r)
- return r;
- }
-
- if (adev->mca.mp1.ras_funcs &&
- adev->mca.mp1.ras_funcs->ras_late_init) {
- r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
- if (r)
- return r;
- }
-
- if (adev->mca.mpio.ras_funcs &&
- adev->mca.mpio.ras_funcs->ras_late_init) {
- r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
- if (r)
- return r;
- }
+ return 0;
+}
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
+{
return 0;
}
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
{
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->ras_fini)
- adev->umc.ras_funcs->ras_fini(adev);
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->ras_fini)
- adev->mmhub.ras_funcs->ras_fini(adev);
-
- if (adev->gmc.xgmi.ras_funcs &&
- adev->gmc.xgmi.ras_funcs->ras_fini)
- adev->gmc.xgmi.ras_funcs->ras_fini(adev);
-
- if (adev->hdp.ras_funcs &&
- adev->hdp.ras_funcs->ras_fini)
- adev->hdp.ras_funcs->ras_fini(adev);
}
/*
@@ -584,6 +527,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case CHIP_NAVI12:
case CHIP_VANGOGH:
case CHIP_YELLOW_CARP:
+ case CHIP_IP_DISCOVERY:
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
@@ -615,11 +559,11 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
{
struct amdgpu_gmc *gmc = &adev->gmc;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
/*
* noretry = 0 will cause kfd page fault tests fail
* for some ASICs, so set default to 1 for these ASICs.
@@ -629,7 +573,6 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
else
gmc->noretry = amdgpu_noretry;
break;
- case CHIP_RAVEN:
default:
/* Raven currently has issues with noretry
* regardless of what we decide for other
@@ -680,6 +623,13 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
unsigned size;
/*
+ * Some ASICs need to reserve a region of video memory to avoid access
+ * from driver
+ */
+ adev->mman.stolen_reserved_offset = 0;
+ adev->mman.stolen_reserved_size = 0;
+
+ /*
* TODO:
* Currently there is a bug where some memory client outside
* of the driver writes to first 8M of VRAM on S3 resume,
@@ -689,10 +639,25 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
*/
switch (adev->asic_type) {
case CHIP_VEGA10:
+ adev->mman.keep_stolen_vga_memory = true;
+ /*
+ * VEGA10 SRIOV VF needs some firmware reserved area.
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ adev->mman.stolen_reserved_offset = 0x100000;
+ adev->mman.stolen_reserved_size = 0x600000;
+ }
+ break;
case CHIP_RAVEN:
case CHIP_RENOIR:
adev->mman.keep_stolen_vga_memory = true;
break;
+ case CHIP_YELLOW_CARP:
+ if (amdgpu_discovery == 0) {
+ adev->mman.stolen_reserved_offset = 0x1ffb0000;
+ adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
+ }
+ break;
default:
adev->mman.keep_stolen_vga_memory = false;
break;
@@ -813,21 +778,48 @@ uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo
return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
}
-void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev)
+int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
{
- /* Some ASICs need to reserve a region of video memory to avoid access
- * from driver */
- adev->mman.stolen_reserved_offset = 0;
- adev->mman.stolen_reserved_size = 0;
+ struct amdgpu_bo *vram_bo = NULL;
+ uint64_t vram_gpu = 0;
+ void *vram_ptr = NULL;
+
+ int ret, size = 0x100000;
+ uint8_t cptr[10];
+
+ ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &vram_bo,
+ &vram_gpu,
+ &vram_ptr);
+ if (ret)
+ return ret;
+
+ memset(vram_ptr, 0x86, size);
+ memset(cptr, 0x86, 10);
+
+ /**
+ * Check the start, the mid, and the end of the memory if the content of
+ * each byte is the pattern "0x86". If yes, we suppose the vram bo is
+ * workable.
+ *
+ * Note: If check the each byte of whole 1M bo, it will cost too many
+ * seconds, so here, we just pick up three parts for emulation.
+ */
+ ret = memcmp(vram_ptr, cptr, 10);
+ if (ret)
+ return ret;
- switch (adev->asic_type) {
- case CHIP_YELLOW_CARP:
- if (amdgpu_discovery == 0) {
- adev->mman.stolen_reserved_offset = 0x1ffb0000;
- adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
- }
- break;
- default:
- break;
- }
+ ret = memcmp(vram_ptr + (size / 2), cptr, 10);
+ if (ret)
+ return ret;
+
+ ret = memcmp(vram_ptr + size - 10, cptr, 10);
+ if (ret)
+ return ret;
+
+ amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
+ &vram_ptr);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 8458cebc6d5b..032b0313f277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "amdgpu_irq.h"
+#include "amdgpu_ras.h"
/* VA hole for 48bit addresses on Vega10 */
#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
@@ -135,12 +136,8 @@ struct amdgpu_gmc_funcs {
unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
};
-struct amdgpu_xgmi_ras_funcs {
- int (*ras_late_init)(struct amdgpu_device *adev);
- void (*ras_fini)(struct amdgpu_device *adev);
- int (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- void (*reset_ras_error_count)(struct amdgpu_device *adev);
+struct amdgpu_xgmi_ras {
+ struct amdgpu_ras_block_object ras_block;
};
struct amdgpu_xgmi {
@@ -159,7 +156,7 @@ struct amdgpu_xgmi {
struct ras_common_if *ras_if;
bool connected_to_cpu;
bool pending_reset;
- const struct amdgpu_xgmi_ras_funcs *ras_funcs;
+ struct amdgpu_xgmi_ras *ras;
};
struct amdgpu_gmc {
@@ -321,6 +318,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
+int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
@@ -333,10 +331,10 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
bool enable);
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
-void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev);
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+int amdgpu_gmc_vram_checking(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 68494b959116..8c6b2284cf56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -187,26 +187,21 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
*
* Re-init the gart for each known BO in the GTT.
*/
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
{
struct ttm_range_mgr_node *node;
struct drm_mm_node *mm_node;
struct amdgpu_device *adev;
- int r = 0;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
node = container_of(mm_node, typeof(*node), mm_nodes[0]);
- r = amdgpu_ttm_recover_gart(node->base.bo);
- if (r)
- break;
+ amdgpu_ttm_recover_gart(node->base.bo);
}
spin_unlock(&mgr->lock);
amdgpu_gart_invalidate_tlb(adev);
-
- return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
deleted file mode 100644
index a766e1aad2b9..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright 2021 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "amdgpu.h"
-#include "amdgpu_ras.h"
-
-int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev)
-{
- int r;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "hdp_err_count",
- };
-
- if (!adev->hdp.ras_if) {
- adev->hdp.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!adev->hdp.ras_if)
- return -ENOMEM;
- adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP;
- adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->hdp.ras_if->sub_block_index = 0;
- }
- ih_info.head = fs_info.head = *adev->hdp.ras_if;
- r = amdgpu_ras_late_init(adev, adev->hdp.ras_if,
- &fs_info, &ih_info);
- if (r || !amdgpu_ras_is_supported(adev, adev->hdp.ras_if->block)) {
- kfree(adev->hdp.ras_if);
- adev->hdp.ras_if = NULL;
- }
-
- return r;
-}
-
-void amdgpu_hdp_ras_fini(struct amdgpu_device *adev)
-{
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP) &&
- adev->hdp.ras_if) {
- struct ras_common_if *ras_if = adev->hdp.ras_if;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
-
- amdgpu_ras_late_fini(adev, ras_if, &ih_info);
- kfree(ras_if);
- }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 7ec99d591584..9181c7bef7c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -22,13 +22,10 @@
*/
#ifndef __AMDGPU_HDP_H__
#define __AMDGPU_HDP_H__
+#include "amdgpu_ras.h"
-struct amdgpu_hdp_ras_funcs {
- int (*ras_late_init)(struct amdgpu_device *adev);
- void (*ras_fini)(struct amdgpu_device *adev);
- void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- void (*reset_ras_error_count)(struct amdgpu_device *adev);
+struct amdgpu_hdp_ras {
+ struct amdgpu_ras_block_object ras_block;
};
struct amdgpu_hdp_funcs {
@@ -43,9 +40,8 @@ struct amdgpu_hdp_funcs {
struct amdgpu_hdp {
struct ras_common_if *ras_if;
const struct amdgpu_hdp_funcs *funcs;
- const struct amdgpu_hdp_ras_funcs *ras_funcs;
+ struct amdgpu_hdp_ras *ras;
};
-int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_hdp_ras_fini(struct amdgpu_device *adev);
+int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index bc1297dcdf97..d583766ea392 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -166,8 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
- (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
- dev_err(adev->dev, "secure submissions not supported on compute rings\n");
+ (!ring->funcs->secure_submission_supported)) {
+ dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 888d97143177..81207737c716 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -187,7 +187,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
unsigned i;
int r;
- if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
+ if (!dma_fence_is_signaled(ring->vmid_wait))
return amdgpu_sync_fence(sync, ring->vmid_wait);
fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f5cbc2747ac6..ea3e8c66211f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -199,13 +199,13 @@ static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
* ack the interrupt if it is there
*/
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
- adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
+ if (adev->nbio.ras &&
+ adev->nbio.ras->handle_ras_controller_intr_no_bifring)
+ adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
- adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+ if (adev->nbio.ras &&
+ adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 38c9fd7b7ad4..67f66f2f1809 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -37,6 +37,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_task_info ti;
struct amdgpu_device *adev = ring->adev;
int idx;
+ int r;
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
@@ -63,7 +64,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti.process_name, ti.tgid, ti.task_name, ti.pid);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
- amdgpu_device_gpu_recover_imp(ring->adev, job);
+ r = amdgpu_device_gpu_recover_imp(ring->adev, job);
+ if (r)
+ DRM_ERROR("GPU Recovery Failed: %d\n", r);
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
@@ -78,14 +81,10 @@ exit:
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm)
{
- size_t size = sizeof(struct amdgpu_job);
-
if (num_ibs == 0)
return -EINVAL;
- size += sizeof(struct amdgpu_ib) * num_ibs;
-
- *job = kzalloc(size, GFP_KERNEL);
+ *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
if (!*job)
return -ENOMEM;
@@ -95,7 +94,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
*/
(*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
- (*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 9e65730193b8..d599c0540b46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -23,6 +23,10 @@
#ifndef __AMDGPU_JOB_H__
#define __AMDGPU_JOB_H__
+#include <drm/gpu_scheduler.h>
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
/* bit set means command submit involves a preamble IB */
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
/* bit set means preamble IB is first presented in belonging context */
@@ -45,12 +49,10 @@ struct amdgpu_job {
struct amdgpu_vm *vm;
struct amdgpu_sync sync;
struct amdgpu_sync sched_sync;
- struct amdgpu_ib *ibs;
struct dma_fence hw_fence;
struct dma_fence *external_hw_fence;
uint32_t preamble_status;
uint32_t preemption_status;
- uint32_t num_ibs;
bool vm_needs_flush;
uint64_t vm_pd_addr;
unsigned vmid;
@@ -66,6 +68,9 @@ struct amdgpu_job {
/* job_run_counter >= 1 means a resubmit job */
uint32_t job_run_counter;
+
+ uint32_t num_ibs;
+ struct amdgpu_ib ibs[];
};
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 0beab961b18b..6b626c293e72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -87,11 +87,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
return;
- if (adev->runpm) {
- pm_runtime_get_sync(dev->dev);
- pm_runtime_forbid(dev->dev);
- }
-
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
@@ -124,22 +119,6 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
mutex_unlock(&mgpu_info.mutex);
}
-static void amdgpu_get_audio_func(struct amdgpu_device *adev)
-{
- struct pci_dev *p = NULL;
-
- p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
- adev->pdev->bus->number, 1);
- if (p) {
- pm_runtime_get_sync(&p->dev);
-
- pm_runtime_mark_last_busy(&p->dev);
- pm_runtime_put_autosuspend(&p->dev);
-
- pci_dev_put(p);
- }
-}
-
/**
* amdgpu_driver_load_kms - Main load function for KMS.
*
@@ -152,21 +131,10 @@ static void amdgpu_get_audio_func(struct amdgpu_device *adev)
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
struct drm_device *dev;
- struct pci_dev *parent;
int r, acpi_status;
dev = adev_to_drm(adev);
- if (amdgpu_has_atpx() &&
- (amdgpu_is_atpx_hybrid() ||
- amdgpu_has_atpx_dgpu_power_cntl()) &&
- ((flags & AMD_IS_APU) == 0) &&
- !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
- flags |= AMD_IS_PX;
-
- parent = pci_upstream_bridge(adev->pdev);
- adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
-
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
* or memory manager initialization failure, it must
@@ -224,58 +192,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
- if (adev->runpm) {
- /* only need to skip on ATPX */
- if (amdgpu_device_supports_px(dev))
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
- /* we want direct complete for BOCO */
- if (amdgpu_device_supports_boco(dev))
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE |
- DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_MAY_SKIP_RESUME);
- pm_runtime_use_autosuspend(dev->dev);
- pm_runtime_set_autosuspend_delay(dev->dev, 5000);
-
- pm_runtime_allow(dev->dev);
-
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
-
- /*
- * For runpm implemented via BACO, PMFW will handle the
- * timing for BACO in and out:
- * - put ASIC into BACO state only when both video and
- * audio functions are in D3 state.
- * - pull ASIC out of BACO state when either video or
- * audio function is in D0 state.
- * Also, at startup, PMFW assumes both functions are in
- * D0 state.
- *
- * So if snd driver was loaded prior to amdgpu driver
- * and audio function was put into D3 state, there will
- * be no PMFW-aware D-state transition(D0->D3) on runpm
- * suspend. Thus the BACO will be not correctly kicked in.
- *
- * Via amdgpu_get_audio_func(), the audio dev is put
- * into D0 state. Then there will be a PMFW-aware D-state
- * transition(D0->D3) on runpm suspend.
- */
- if (amdgpu_device_supports_baco(dev) &&
- !(adev->flags & AMD_IS_APU) &&
- (adev->asic_type >= CHIP_NAVI10))
- amdgpu_get_audio_func(adev);
- }
-
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
- if (r) {
- /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
- if (adev->rmmio && adev->runpm)
- pm_runtime_put_noidle(dev->dev);
+ if (r)
amdgpu_driver_unload_kms(dev);
- }
return r;
}
@@ -406,6 +328,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->psp.toc.fw_version;
fw_info->feature = adev->psp.toc.feature_version;
break;
+ case AMDGPU_INFO_FW_CAP:
+ fw_info->ver = adev->psp.cap_fw_version;
+ fw_info->feature = adev->psp.cap_feature_version;
+ break;
default:
return -EINVAL;
}
@@ -1270,18 +1196,20 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
amdgpu_vce_free_handles(adev, file_priv);
- amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
-
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
- amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+ amdgpu_vm_bo_del(adev, fpriv->csa_va);
fpriv->csa_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj);
}
pasid = fpriv->vm.pasid;
pd = amdgpu_bo_ref(fpriv->vm.root.bo);
+ if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
+ amdgpu_vm_bo_del(adev, fpriv->prt_va);
+ amdgpu_bo_unreserve(pd);
+ }
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);
@@ -1429,8 +1357,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context;
- uint8_t smu_minor, smu_debug;
- uint16_t smu_major;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret, i;
static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
@@ -1576,11 +1503,12 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
- smu_major = (fw_info.ver >> 16) & 0xffff;
+ smu_program = (fw_info.ver >> 24) & 0xff;
+ smu_major = (fw_info.ver >> 16) & 0xff;
smu_minor = (fw_info.ver >> 8) & 0xff;
smu_debug = (fw_info.ver >> 0) & 0xff;
- seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n",
- fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug);
+ seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
+ fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
/* SDMA */
query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
@@ -1625,6 +1553,16 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ /* CAP */
+ if (adev->psp.cap_fw) {
+ query_fw.fw_type = AMDGPU_INFO_FW_CAP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+ }
+
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index ce538f4819f9..51c2a82e2fa4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -70,48 +70,3 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
amdgpu_mca_reset_error_count(adev, mc_status_addr);
}
-
-int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
- struct amdgpu_mca_ras *mca_dev)
-{
- int r;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = mca_dev->ras_funcs->sysfs_name,
- };
-
- if (!mca_dev->ras_if) {
- mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!mca_dev->ras_if)
- return -ENOMEM;
- mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
- mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block;
- mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- }
- ih_info.head = fs_info.head = *mca_dev->ras_if;
- r = amdgpu_ras_late_init(adev, mca_dev->ras_if,
- &fs_info, &ih_info);
- if (r || !amdgpu_ras_is_supported(adev, mca_dev->ras_if->block)) {
- kfree(mca_dev->ras_if);
- mca_dev->ras_if = NULL;
- }
-
- return r;
-}
-
-void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
- struct amdgpu_mca_ras *mca_dev)
-{
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
-
- if (!mca_dev->ras_if)
- return;
-
- amdgpu_ras_late_fini(adev, mca_dev->ras_if, &ih_info);
- kfree(mca_dev->ras_if);
- mca_dev->ras_if = NULL;
-} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index c74bc7177066..7ce16d16e34b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -21,21 +21,13 @@
#ifndef __AMDGPU_MCA_H__
#define __AMDGPU_MCA_H__
-struct amdgpu_mca_ras_funcs {
- int (*ras_late_init)(struct amdgpu_device *adev);
- void (*ras_fini)(struct amdgpu_device *adev);
- void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- void (*query_ras_error_address)(struct amdgpu_device *adev,
- void *ras_error_status);
- uint32_t ras_block;
- uint32_t ras_sub_block;
- const char* sysfs_name;
+struct amdgpu_mca_ras_block {
+ struct amdgpu_ras_block_object ras_block;
};
struct amdgpu_mca_ras {
struct ras_common_if *ras_if;
- const struct amdgpu_mca_ras_funcs *ras_funcs;
+ struct amdgpu_mca_ras_block *ras;
};
struct amdgpu_mca_funcs {
@@ -64,10 +56,4 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
void *ras_error_status);
-int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
- struct amdgpu_mca_ras *mca_dev);
-
-void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
- struct amdgpu_mca_ras *mca_dev);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
deleted file mode 100644
index 24297dc51434..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "amdgpu.h"
-#include "amdgpu_ras.h"
-
-int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
-{
- int r;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "mmhub_err_count",
- };
-
- if (!adev->mmhub.ras_if) {
- adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!adev->mmhub.ras_if)
- return -ENOMEM;
- adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
- adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->mmhub.ras_if->sub_block_index = 0;
- }
- ih_info.head = fs_info.head = *adev->mmhub.ras_if;
- r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
- &fs_info, &ih_info);
- if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) {
- kfree(adev->mmhub.ras_if);
- adev->mmhub.ras_if = NULL;
- }
-
- return r;
-}
-
-void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev)
-{
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
- adev->mmhub.ras_if) {
- struct ras_common_if *ras_if = adev->mmhub.ras_if;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
-
- amdgpu_ras_late_fini(adev, ras_if, &ih_info);
- kfree(ras_if);
- }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index b27fcbccce2b..9f1540f0ebf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -21,14 +21,8 @@
#ifndef __AMDGPU_MMHUB_H__
#define __AMDGPU_MMHUB_H__
-struct amdgpu_mmhub_ras_funcs {
- int (*ras_late_init)(struct amdgpu_device *adev);
- void (*ras_fini)(struct amdgpu_device *adev);
- void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- void (*query_ras_error_status)(struct amdgpu_device *adev);
- void (*reset_ras_error_count)(struct amdgpu_device *adev);
- void (*reset_ras_error_status)(struct amdgpu_device *adev);
+struct amdgpu_mmhub_ras {
+ struct amdgpu_ras_block_object ras_block;
};
struct amdgpu_mmhub_funcs {
@@ -50,10 +44,8 @@ struct amdgpu_mmhub_funcs {
struct amdgpu_mmhub {
struct ras_common_if *ras_if;
const struct amdgpu_mmhub_funcs *funcs;
- const struct amdgpu_mmhub_ras_funcs *ras_funcs;
+ struct amdgpu_mmhub_ras *ras;
};
-int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 902235fae4cd..a546cb3cfa18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -341,6 +341,7 @@ struct amdgpu_mode_info {
int num_crtc; /* number of crtcs */
int num_hpd; /* number of hpd pins */
int num_dig; /* number of dig blocks */
+ bool gpu_vm_support; /* supports display from GTT */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
const enum drm_plane_type *plane_type;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 6afb02fef8cf..37d779b8e4a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -22,61 +22,24 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
-int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "pcie_bif_err_count",
- };
-
- if (!adev->nbio.ras_if) {
- adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!adev->nbio.ras_if)
- return -ENOMEM;
- adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
- adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->nbio.ras_if->sub_block_index = 0;
- }
- ih_info.head = fs_info.head = *adev->nbio.ras_if;
- r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
- &fs_info, &ih_info);
+ r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
- goto free;
+ return r;
- if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
if (r)
goto late_fini;
r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
if (r)
goto late_fini;
- } else {
- r = 0;
- goto free;
}
return 0;
late_fini:
- amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info);
-free:
- kfree(adev->nbio.ras_if);
- adev->nbio.ras_if = NULL;
+ amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
-
-void amdgpu_nbio_ras_fini(struct amdgpu_device *adev)
-{
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
- adev->nbio.ras_if) {
- struct ras_common_if *ras_if = adev->nbio.ras_if;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
-
- amdgpu_ras_late_fini(adev, ras_if, &ih_info);
- kfree(ras_if);
- }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 843052205bd5..3d13e601fc35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -47,15 +47,12 @@ struct nbio_hdp_flush_reg {
u32 ref_and_mask_sdma7;
};
-struct amdgpu_nbio_ras_funcs {
+struct amdgpu_nbio_ras {
+ struct amdgpu_ras_block_object ras_block;
void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
- void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- int (*ras_late_init)(struct amdgpu_device *adev);
- void (*ras_fini)(struct amdgpu_device *adev);
};
struct amdgpu_nbio_funcs {
@@ -104,9 +101,8 @@ struct amdgpu_nbio {
struct amdgpu_irq_src ras_err_event_athub_irq;
struct ras_common_if *ras_if;
const struct amdgpu_nbio_funcs *funcs;
- const struct amdgpu_nbio_ras_funcs *ras_funcs;
+ struct amdgpu_nbio_ras *ras;
};
-int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_nbio_ras_fini(struct amdgpu_device *adev);
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ea0cde4904f0..25731719c627 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -575,6 +575,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ if (adev->ras_enabled)
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
AMDGPU_GEM_DOMAIN_GDS))
@@ -1303,7 +1306,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
- dma_resv_lock(bo->base.resv, NULL);
+ if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
+ return;
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
if (!WARN_ON(r)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index dee17a0e1187..a6acec1a6155 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -133,6 +133,8 @@ static int psp_early_init(void *handle)
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 8):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
@@ -259,6 +261,32 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
return ret;
}
+static int psp_init_sriov_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret = 0;
+
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ ret = psp_init_cap_microcode(psp, "vega10");
+ break;
+ case IP_VERSION(11, 0, 9):
+ ret = psp_init_cap_microcode(psp, "navi12");
+ break;
+ case IP_VERSION(11, 0, 7):
+ ret = psp_init_cap_microcode(psp, "sienna_cichlid");
+ break;
+ case IP_VERSION(13, 0, 2):
+ ret = psp_init_cap_microcode(psp, "aldebaran");
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return ret;
+}
+
static int psp_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -273,21 +301,19 @@ static int psp_sw_init(void *handle)
ret = -ENOMEM;
}
- if (!amdgpu_sriov_vf(adev)) {
+ if (amdgpu_sriov_vf(adev))
+ ret = psp_init_sriov_microcode(psp);
+ else
ret = psp_init_microcode(psp);
- if (ret) {
- DRM_ERROR("Failed to load psp firmware!\n");
- return ret;
- }
- } else if (amdgpu_sriov_vf(adev) &&
- adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2)) {
- ret = psp_init_ta_microcode(psp, "aldebaran");
- if (ret) {
- DRM_ERROR("Failed to initialize ta microcode!\n");
- return ret;
- }
+ if (ret) {
+ DRM_ERROR("Failed to load psp firmware!\n");
+ return ret;
}
+ adev->psp.xgmi_context.supports_extended_data =
+ !adev->gmc.xgmi.connected_to_cpu &&
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);
+
memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
if (psp_get_runtime_db_entry(adev,
PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
@@ -353,6 +379,10 @@ static int psp_sw_fini(void *handle)
release_firmware(psp->ta_fw);
psp->ta_fw = NULL;
}
+ if (adev->psp.cap_fw) {
+ release_firmware(psp->cap_fw);
+ psp->cap_fw = NULL;
+ }
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
@@ -491,7 +521,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
- if (!timeout) {
+ /* If we load CAP FW, PSP must return 0 under SRIOV
+ * also return failure in case of timeout
+ */
+ if ((ucode && (ucode->ucode_id == AMDGPU_UCODE_ID_CAP)) || !timeout) {
ret = -EINVAL;
goto exit;
}
@@ -914,19 +947,15 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
static int psp_ta_init_shared_buf(struct psp_context *psp,
struct ta_mem_context *mem_ctx)
{
- int ret;
-
/*
* Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for ta to host memory
*/
- ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
+ return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
-
- return ret;
}
static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
@@ -1308,6 +1337,11 @@ static void psp_ras_ta_check_status(struct psp_context *psp)
break;
case TA_RAS_STATUS__SUCCESS:
break;
+ case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
+ if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: Inject error to critical region is not allowed\n");
+ break;
default:
dev_warn(psp->adev->dev,
"RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
@@ -1520,7 +1554,9 @@ int psp_ras_trigger_error(struct psp_context *psp,
if (amdgpu_ras_intr_triggered())
return 0;
- if (ras_cmd->ras_status)
+ if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
+ return -EACCES;
+ else if (ras_cmd->ras_status)
return -EINVAL;
return 0;
@@ -2051,6 +2087,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
enum psp_gfx_fw_type *type)
{
switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_CAP:
+ *type = GFX_FW_TYPE_CAP;
+ break;
case AMDGPU_UCODE_ID_SDMA0:
*type = GFX_FW_TYPE_SDMA0;
break;
@@ -2973,7 +3012,6 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
adev->psp.sos.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr->sos.offset_bytes);
- adev->psp.xgmi_context.supports_extended_data = false;
} else {
/* Load alternate PSP SOS FW */
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
@@ -2988,7 +3026,6 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
adev->psp.sos.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
- adev->psp.xgmi_context.supports_extended_data = true;
}
if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
@@ -3217,6 +3254,58 @@ out:
return err;
}
+int psp_init_cap_microcode(struct psp_context *psp,
+ const char *chip_name)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[PSP_FW_NAME_LEN];
+ const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
+ struct amdgpu_firmware_info *info = NULL;
+ int err = 0;
+
+ if (!chip_name) {
+ dev_err(adev->dev, "invalid chip name for cap microcode\n");
+ return -EINVAL;
+ }
+
+ if (!amdgpu_sriov_vf(adev)) {
+ dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
+ return -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
+ err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
+ if (err) {
+ dev_warn(adev->dev, "cap microcode does not exist, skip\n");
+ err = 0;
+ goto out;
+ }
+
+ err = amdgpu_ucode_validate(adev->psp.cap_fw);
+ if (err) {
+ dev_err(adev->dev, "fail to initialize cap microcode\n");
+ goto out;
+ }
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
+ info->ucode_id = AMDGPU_UCODE_ID_CAP;
+ info->fw = adev->psp.cap_fw;
+ cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
+ adev->psp.cap_fw->data;
+ adev->firmware.fw_size += ALIGN(
+ le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
+ adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
+ adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
+ adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
+
+ return 0;
+
+out:
+ release_firmware(adev->psp.cap_fw);
+ adev->psp.cap_fw = NULL;
+ return err;
+}
+
static int psp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index f29afabbff1f..ff7d533eb746 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -306,6 +306,9 @@ struct psp_context
/* toc firmware */
const struct firmware *toc_fw;
+ /* cap firmware */
+ const struct firmware *cap_fw;
+
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
uint64_t fence_buf_mc_addr;
@@ -327,6 +330,10 @@ struct psp_context
const struct firmware *ta_fw;
uint32_t ta_fw_version;
+ uint32_t cap_fw_version;
+ uint32_t cap_feature_version;
+ uint32_t cap_ucode_size;
+
struct ta_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras_context;
@@ -440,6 +447,8 @@ int psp_init_sos_microcode(struct psp_context *psp,
const char *chip_name);
int psp_init_ta_microcode(struct psp_context *psp,
const char *chip_name);
+int psp_init_cap_microcode(struct psp_context *psp,
+ const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 8f47c14ecbc7..424c22a841f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -75,6 +75,13 @@ const char *ras_mca_block_string[] = {
"mca_iohc",
};
+struct amdgpu_ras_block_list {
+ /* ras block link */
+ struct list_head node;
+
+ struct amdgpu_ras_block_object *ras_obj;
+};
+
const char *get_ras_block_str(struct ras_common_if *ras_block)
{
if (!ras_block)
@@ -89,6 +96,9 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
return ras_block_string[ras_block->block];
}
+#define ras_block_str(_BLOCK_) \
+ (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
+
#define ras_err_str(i) (ras_error_string[ffs(i)])
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
@@ -155,14 +165,9 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
}
memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
-
- err_rec.address = address;
- err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
- err_rec.ts = (uint64_t)ktime_get_real_seconds();
- err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
-
err_data.err_addr = &err_rec;
- err_data.err_addr_cnt = 1;
+ amdgpu_umc_fill_error_record(&err_data, address,
+ (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -452,7 +457,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
}
if (ret)
- return -EINVAL;
+ return ret;
return size;
}
@@ -866,30 +871,47 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
}
/* feature ctl end */
+static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
+ enum amdgpu_ras_block block)
+{
+ if (!block_obj)
+ return -EINVAL;
-static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
- struct ras_common_if *ras_block,
- struct ras_err_data *err_data)
+ if (block_obj->ras_comm.block == block)
+ return 0;
+
+ return -EINVAL;
+}
+
+static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint32_t sub_block_index)
{
- switch (ras_block->sub_block_index) {
- case AMDGPU_RAS_MCA_BLOCK__MP0:
- if (adev->mca.mp0.ras_funcs &&
- adev->mca.mp0.ras_funcs->query_ras_error_count)
- adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data);
- break;
- case AMDGPU_RAS_MCA_BLOCK__MP1:
- if (adev->mca.mp1.ras_funcs &&
- adev->mca.mp1.ras_funcs->query_ras_error_count)
- adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data);
- break;
- case AMDGPU_RAS_MCA_BLOCK__MPIO:
- if (adev->mca.mpio.ras_funcs &&
- adev->mca.mpio.ras_funcs->query_ras_error_count)
- adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data);
- break;
- default:
- break;
+ struct amdgpu_ras_block_list *node, *tmp;
+ struct amdgpu_ras_block_object *obj;
+
+ if (block >= AMDGPU_RAS_BLOCK__LAST)
+ return NULL;
+
+ if (!amdgpu_ras_is_supported(adev, block))
+ return NULL;
+
+ list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
+ if (!node->ras_obj) {
+ dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
+ continue;
+ }
+
+ obj = node->ras_obj;
+ if (obj->ras_block_match) {
+ if (obj->ras_block_match(obj, block, sub_block_index) == 0)
+ return obj;
+ } else {
+ if (amdgpu_ras_block_match_default(obj, block) == 0)
+ return obj;
+ }
}
+
+ return NULL;
}
static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
@@ -901,26 +923,26 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d
* choosing right query method according to
* whether smu support query error information
*/
- ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc));
+ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
if (ret == -EOPNOTSUPP) {
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_count)
- adev->umc.ras_funcs->query_ras_error_count(adev, err_data);
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
/* umc query_ras_error_address is also responsible for clearing
* error status
*/
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_address)
- adev->umc.ras_funcs->query_ras_error_address(adev, err_data);
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
} else if (!ret) {
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->ecc_info_query_ras_error_count)
- adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data);
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_count)
+ adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->ecc_info_query_ras_error_address)
- adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data);
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_address)
+ adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
}
}
@@ -928,62 +950,32 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info)
{
+ struct amdgpu_ras_block_object *block_obj = NULL;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
- int i;
if (!obj)
return -EINVAL;
- switch (info->head.block) {
- case AMDGPU_RAS_BLOCK__UMC:
+ if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
amdgpu_ras_get_ecc_info(adev, &err_data);
- break;
- case AMDGPU_RAS_BLOCK__SDMA:
- if (adev->sdma.funcs->query_ras_error_count) {
- for (i = 0; i < adev->sdma.num_instances; i++)
- adev->sdma.funcs->query_ras_error_count(adev, i,
- &err_data);
+ } else {
+ block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
+ if (!block_obj || !block_obj->hw_ops) {
+ dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+ get_ras_block_str(&info->head));
+ return -EINVAL;
}
- break;
- case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->query_ras_error_count)
- adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
-
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->query_ras_error_status)
- adev->gfx.ras_funcs->query_ras_error_status(adev);
- break;
- case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->query_ras_error_count)
- adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
-
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->query_ras_error_status)
- adev->mmhub.ras_funcs->query_ras_error_status(adev);
- break;
- case AMDGPU_RAS_BLOCK__PCIE_BIF:
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->query_ras_error_count)
- adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
- break;
- case AMDGPU_RAS_BLOCK__XGMI_WAFL:
- if (adev->gmc.xgmi.ras_funcs &&
- adev->gmc.xgmi.ras_funcs->query_ras_error_count)
- adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
- break;
- case AMDGPU_RAS_BLOCK__HDP:
- if (adev->hdp.ras_funcs &&
- adev->hdp.ras_funcs->query_ras_error_count)
- adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
- break;
- case AMDGPU_RAS_BLOCK__MCA:
- amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data);
- break;
- default:
- break;
+
+ if (block_obj->hw_ops->query_ras_error_count)
+ block_obj->hw_ops->query_ras_error_count(adev, &err_data);
+
+ if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
+ (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
+ (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
+ if (block_obj->hw_ops->query_ras_error_status)
+ block_obj->hw_ops->query_ras_error_status(adev);
+ }
}
obj->err_data.ue_count += err_data.ue_count;
@@ -1040,68 +1032,27 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
enum amdgpu_ras_block block)
{
+ struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
+
if (!amdgpu_ras_is_supported(adev, block))
return -EINVAL;
- switch (block) {
- case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->reset_ras_error_count)
- adev->gfx.ras_funcs->reset_ras_error_count(adev);
-
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->reset_ras_error_status)
- adev->gfx.ras_funcs->reset_ras_error_status(adev);
- break;
- case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->reset_ras_error_count)
- adev->mmhub.ras_funcs->reset_ras_error_count(adev);
-
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->reset_ras_error_status)
- adev->mmhub.ras_funcs->reset_ras_error_status(adev);
- break;
- case AMDGPU_RAS_BLOCK__SDMA:
- if (adev->sdma.funcs->reset_ras_error_count)
- adev->sdma.funcs->reset_ras_error_count(adev);
- break;
- case AMDGPU_RAS_BLOCK__HDP:
- if (adev->hdp.ras_funcs &&
- adev->hdp.ras_funcs->reset_ras_error_count)
- adev->hdp.ras_funcs->reset_ras_error_count(adev);
- break;
- default:
- break;
+ if (!block_obj || !block_obj->hw_ops) {
+ dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+ ras_block_str(block));
+ return -EINVAL;
}
- return 0;
-}
-
-/* Trigger XGMI/WAFL error */
-static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
- struct ta_ras_trigger_error_input *block_info)
-{
- int ret;
-
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
- dev_warn(adev->dev, "Failed to disallow df cstate");
-
- if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
- dev_warn(adev->dev, "Failed to disallow XGMI power down");
-
- ret = psp_ras_trigger_error(&adev->psp, block_info);
+ if (block_obj->hw_ops->reset_ras_error_count)
+ block_obj->hw_ops->reset_ras_error_count(adev);
- if (amdgpu_ras_intr_triggered())
- return ret;
-
- if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
- dev_warn(adev->dev, "Failed to allow XGMI power down");
-
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
- dev_warn(adev->dev, "Failed to allow df cstate");
+ if ((block == AMDGPU_RAS_BLOCK__GFX) ||
+ (block == AMDGPU_RAS_BLOCK__MMHUB)) {
+ if (block_obj->hw_ops->reset_ras_error_status)
+ block_obj->hw_ops->reset_ras_error_status(adev);
+ }
- return ret;
+ return 0;
}
/* wrapper of psp_ras_trigger_error */
@@ -1116,11 +1067,20 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
.address = info->address,
.value = info->value,
};
- int ret = 0;
+ int ret = -EINVAL;
+ struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
+ info->head.block,
+ info->head.sub_block_index);
if (!obj)
return -EINVAL;
+ if (!block_obj || !block_obj->hw_ops) {
+ dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+ get_ras_block_str(&info->head));
+ return -EINVAL;
+ }
+
/* Calculate XGMI relative offset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
block_info.address =
@@ -1128,28 +1088,15 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
block_info.address);
}
- switch (info->head.block) {
- case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->ras_error_inject)
- ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
- else
- ret = -EINVAL;
- break;
- case AMDGPU_RAS_BLOCK__UMC:
- case AMDGPU_RAS_BLOCK__SDMA:
- case AMDGPU_RAS_BLOCK__MMHUB:
- case AMDGPU_RAS_BLOCK__PCIE_BIF:
- case AMDGPU_RAS_BLOCK__MCA:
- ret = psp_ras_trigger_error(&adev->psp, &block_info);
- break;
- case AMDGPU_RAS_BLOCK__XGMI_WAFL:
- ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
- break;
- default:
- dev_info(adev->dev, "%s error injection is not supported yet\n",
- get_ras_block_str(&info->head));
- ret = -EINVAL;
+ if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
+ if (block_obj->hw_ops->ras_error_inject)
+ ret = block_obj->hw_ops->ras_error_inject(adev, info);
+ } else {
+ /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
+ if (block_obj->hw_ops->ras_error_inject)
+ ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
+ else /*If not defined .ras_error_inject, use default ras_error_inject*/
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
}
if (ret)
@@ -1329,18 +1276,17 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
}
int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
- struct ras_fs_if *head)
+ struct ras_common_if *head)
{
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
if (!obj || obj->attr_inuse)
return -EINVAL;
get_obj(obj);
- memcpy(obj->fs_data.sysfs_name,
- head->sysfs_name,
- sizeof(obj->fs_data.sysfs_name));
+ snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
+ "%s_err_count", head->name);
obj->sysfs_attr = (struct device_attribute){
.attr = {
@@ -1647,9 +1593,9 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
}
int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
- struct ras_ih_if *info)
+ struct ras_common_if *head)
{
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
struct ras_ih_data *data;
if (!obj)
@@ -1669,24 +1615,27 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
}
int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
- struct ras_ih_if *info)
+ struct ras_common_if *head)
{
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
struct ras_ih_data *data;
+ struct amdgpu_ras_block_object *ras_obj;
if (!obj) {
/* in case we registe the IH before enable ras feature */
- obj = amdgpu_ras_create_obj(adev, &info->head);
+ obj = amdgpu_ras_create_obj(adev, head);
if (!obj)
return -EINVAL;
} else
get_obj(obj);
+ ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
+
data = &obj->ih_data;
/* add the callback.etc */
*data = (struct ras_ih_data) {
.inuse = 0,
- .cb = info->cb,
+ .cb = ras_obj->ras_cb,
.element_size = sizeof(struct amdgpu_iv_entry),
.rptr = 0,
.wptr = 0,
@@ -1715,10 +1664,7 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
struct ras_manager *obj, *tmp;
list_for_each_entry_safe(obj, tmp, &con->head, node) {
- struct ras_ih_if info = {
- .head = obj->head,
- };
- amdgpu_ras_interrupt_remove_handler(adev, &info);
+ amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
}
return 0;
@@ -1766,24 +1712,28 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
struct ras_query_if *info)
{
+ struct amdgpu_ras_block_object *block_obj;
/*
* Only two block need to query read/write
* RspStatus at current state
*/
- switch (info->head.block) {
- case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->query_ras_error_status)
- adev->gfx.ras_funcs->query_ras_error_status(adev);
- break;
- case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->query_ras_error_status)
- adev->mmhub.ras_funcs->query_ras_error_status(adev);
- break;
- default:
- break;
+ if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
+ (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
+ return;
+
+ block_obj = amdgpu_ras_get_ras_block(adev,
+ info->head.block,
+ info->head.sub_block_index);
+
+ if (!block_obj || !block_obj->hw_ops) {
+ dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+ get_ras_block_str(&info->head));
+ return;
}
+
+ if (block_obj->hw_ops->query_ras_error_status)
+ block_obj->hw_ops->query_ras_error_status(adev);
+
}
static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
@@ -2118,6 +2068,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
mutex_init(&con->recovery_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
+ con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
@@ -2141,8 +2092,12 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
if (ret)
goto free;
- if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
- adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
+
+ if (con->update_channel_flag == true) {
+ amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
+ con->update_channel_flag = false;
+ }
}
#ifdef CONFIG_X86_MCE_AMD
@@ -2336,6 +2291,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ con->update_channel_flag = false;
con->features = 0;
INIT_LIST_HEAD(&con->head);
/* Might need get this flag from vbios. */
@@ -2348,24 +2304,27 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
case CHIP_VEGA20:
case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
- if (!adev->gmc.xgmi.connected_to_cpu)
- adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ adev->nbio.ras = &nbio_v7_4_ras;
+ amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
+ adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm;
+ }
break;
default:
/* nbio ras is not available */
break;
}
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->init_ras_controller_interrupt) {
- r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
+ if (adev->nbio.ras &&
+ adev->nbio.ras->init_ras_controller_interrupt) {
+ r = adev->nbio.ras->init_ras_controller_interrupt(adev);
if (r)
goto release_con;
}
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
- r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
+ if (adev->nbio.ras &&
+ adev->nbio.ras->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
if (r)
goto release_con;
}
@@ -2377,12 +2336,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
}
else if (adev->df.funcs &&
adev->df.funcs->query_ras_poison_mode &&
- adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_poison_mode) {
+ adev->umc.ras &&
+ adev->umc.ras->query_ras_poison_mode) {
df_poison =
adev->df.funcs->query_ras_poison_mode(adev);
umc_poison =
- adev->umc.ras_funcs->query_ras_poison_mode(adev);
+ adev->umc.ras->query_ras_poison_mode(adev);
/* Only poison is set in both DF and UMC, we can support it */
if (df_poison && umc_poison)
con->poison_supported = true;
@@ -2445,11 +2404,10 @@ bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
}
/* helper function to handle common stuff in ip late init phase */
-int amdgpu_ras_late_init(struct amdgpu_device *adev,
- struct ras_common_if *ras_block,
- struct ras_fs_if *fs_info,
- struct ras_ih_if *ih_info)
+int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
{
+ struct amdgpu_ras_block_object *ras_obj = NULL;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
unsigned long ue_count, ce_count;
int r;
@@ -2477,15 +2435,16 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
if (adev->in_suspend || amdgpu_in_reset(adev))
return 0;
- if (ih_info->cb) {
- r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
+ ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
+ if (ras_obj->ras_cb) {
+ r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
if (r)
- goto interrupt;
+ goto cleanup;
}
- r = amdgpu_ras_sysfs_create(adev, fs_info);
+ r = amdgpu_ras_sysfs_create(adev, ras_block);
if (r)
- goto sysfs;
+ goto interrupt;
/* Those are the cached values at init.
*/
@@ -2495,27 +2454,40 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
}
return 0;
-cleanup:
- amdgpu_ras_sysfs_remove(adev, ras_block);
-sysfs:
- if (ih_info->cb)
- amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+
interrupt:
+ if (ras_obj->ras_cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ras_block);
+cleanup:
amdgpu_ras_feature_enable(adev, ras_block, 0);
return r;
}
+static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ return amdgpu_ras_block_late_init(adev, ras_block);
+}
+
/* helper function to remove ras fs node and interrupt handler */
-void amdgpu_ras_late_fini(struct amdgpu_device *adev,
- struct ras_common_if *ras_block,
- struct ras_ih_if *ih_info)
+void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
{
- if (!ras_block || !ih_info)
+ struct amdgpu_ras_block_object *ras_obj;
+ if (!ras_block)
return;
amdgpu_ras_sysfs_remove(adev, ras_block);
- if (ih_info->cb)
- amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+
+ ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
+ if (ras_obj->ras_cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ras_block);
+}
+
+static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ return amdgpu_ras_block_late_fini(adev, ras_block);
}
/* do some init work after IP late init as dependence.
@@ -2568,6 +2540,33 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev)
amdgpu_ras_disable_all_features(adev, 1);
}
+int amdgpu_ras_late_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_block_list *node, *tmp;
+ struct amdgpu_ras_block_object *obj;
+ int r;
+
+ list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
+ if (!node->ras_obj) {
+ dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
+ continue;
+ }
+
+ obj = node->ras_obj;
+ if (obj->ras_late_init) {
+ r = obj->ras_late_init(adev, &obj->ras_comm);
+ if (r) {
+ dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
+ obj->ras_comm.name, r);
+ return r;
+ }
+ } else
+ amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
+ }
+
+ return 0;
+}
+
/* do some fini work before IP fini as dependence */
int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
{
@@ -2585,11 +2584,28 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
int amdgpu_ras_fini(struct amdgpu_device *adev)
{
+ struct amdgpu_ras_block_list *ras_node, *tmp;
+ struct amdgpu_ras_block_object *obj = NULL;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
if (!adev->ras_enabled || !con)
return 0;
+ list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
+ if (ras_node->ras_obj) {
+ obj = ras_node->ras_obj;
+ if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
+ obj->ras_fini)
+ obj->ras_fini(adev, &obj->ras_comm);
+ else
+ amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
+ }
+
+ /* Clear ras blocks from ras_list and free ras block list node */
+ list_del(&ras_node->node);
+ kfree(ras_node);
+ }
+
amdgpu_ras_fs_fini(adev);
amdgpu_ras_interrupt_remove_all(adev);
@@ -2717,8 +2733,6 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
umc_inst, ch_inst);
- memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
-
/*
* Translate UMC channel address to Physical address
*/
@@ -2730,16 +2744,10 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(m->addr);
- err_rec.address = m->addr;
- err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
- err_rec.ts = (uint64_t)ktime_get_real_seconds();
- err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
- err_rec.cu = 0;
- err_rec.mem_channel = channel_index;
- err_rec.mcumc_id = umc_inst;
-
+ memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
err_data.err_addr = &err_rec;
- err_data.err_addr_cnt = 1;
+ amdgpu_umc_fill_error_record(&err_data, m->addr,
+ retired_page, channel_index, umc_inst);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -2777,3 +2785,63 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
}
}
#endif
+
+struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
+{
+ if (!adev)
+ return NULL;
+
+ return adev->psp.ras_context.ras;
+}
+
+int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
+{
+ if (!adev)
+ return -EINVAL;
+
+ adev->psp.ras_context.ras = ras_con;
+ return 0;
+}
+
+/* check if ras is supported on block, say, sdma, gfx */
+int amdgpu_ras_is_supported(struct amdgpu_device *adev,
+ unsigned int block)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (block >= AMDGPU_RAS_BLOCK_COUNT)
+ return 0;
+ return ras && (adev->ras_enabled & (1 << block));
+}
+
+int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+ schedule_work(&ras->recovery_work);
+ return 0;
+}
+
+
+/* Register each ip ras block into amdgpu ras */
+int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
+ struct amdgpu_ras_block_object *ras_block_obj)
+{
+ struct amdgpu_ras_block_list *ras_node;
+ if (!adev || !ras_block_obj)
+ return -EINVAL;
+
+ if (!amdgpu_ras_asic_supported(adev))
+ return 0;
+
+ ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
+ if (!ras_node)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ras_node->node);
+ ras_node->ras_obj = ras_block_obj;
+ list_add_tail(&ras_node->node, &adev->ras_list);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 1c708122d492..9314fde81e68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -26,11 +26,11 @@
#include <linux/debugfs.h>
#include <linux/list.h>
-#include "amdgpu.h"
-#include "amdgpu_psp.h"
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
+struct amdgpu_iv_entry;
+
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
enum amdgpu_ras_block {
@@ -374,6 +374,9 @@ struct amdgpu_ras {
/* record umc error info queried from smu */
struct umc_ecc_info umc_ecc;
+
+ /* Indicates smu whether need update bad channel info */
+ bool update_channel_flag;
};
struct ras_fs_data {
@@ -484,6 +487,27 @@ struct ras_debug_if {
};
int op;
};
+
+struct amdgpu_ras_block_object {
+ struct ras_common_if ras_comm;
+
+ int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj,
+ enum amdgpu_ras_block block, uint32_t sub_block_index);
+ int (*ras_late_init)(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+ void (*ras_fini)(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+ ras_ih_cb ras_cb;
+ const struct amdgpu_ras_block_hw_ops *hw_ops;
+};
+
+struct amdgpu_ras_block_hw_ops {
+ int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
+ void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status);
+ void (*query_ras_error_status)(struct amdgpu_device *adev);
+ void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ void (*reset_ras_error_status)(struct amdgpu_device *adev);
+};
+
/* work flow
* vbios
* 1: ras feature enable (enabled by default)
@@ -498,19 +522,6 @@ struct ras_debug_if {
* 8: feature disable
*/
-#define amdgpu_ras_get_context(adev) ((adev)->psp.ras_context.ras)
-#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras_context.ras = (ras_con))
-
-/* check if ras is supported on block, say, sdma, gfx */
-static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
- unsigned int block)
-{
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
-
- if (block >= AMDGPU_RAS_BLOCK_COUNT)
- return 0;
- return ras && (adev->ras_enabled & (1 << block));
-}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
@@ -527,15 +538,6 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev);
-static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
-{
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
-
- if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
- schedule_work(&ras->recovery_work);
- return 0;
-}
-
static inline enum ta_ras_block
amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
switch (block) {
@@ -596,15 +598,15 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
/* called in ip_init and ip_fini */
int amdgpu_ras_init(struct amdgpu_device *adev);
+int amdgpu_ras_late_init(struct amdgpu_device *adev);
int amdgpu_ras_fini(struct amdgpu_device *adev);
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
-int amdgpu_ras_late_init(struct amdgpu_device *adev,
- struct ras_common_if *ras_block,
- struct ras_fs_if *fs_info,
- struct ras_ih_if *ih_info);
-void amdgpu_ras_late_fini(struct amdgpu_device *adev,
- struct ras_common_if *ras_block,
- struct ras_ih_if *ih_info);
+
+int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
+
+void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
@@ -613,7 +615,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
- struct ras_fs_if *head);
+ struct ras_common_if *head);
int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
@@ -630,10 +632,10 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info);
int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
- struct ras_ih_if *info);
+ struct ras_common_if *head);
int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
- struct ras_ih_if *info);
+ struct ras_common_if *head);
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_dispatch_if *info);
@@ -667,4 +669,14 @@ const char *get_ras_block_str(struct ras_common_if *ras_block);
bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev);
+int amdgpu_ras_is_supported(struct amdgpu_device *adev, unsigned int block);
+
+int amdgpu_ras_reset_gpu(struct amdgpu_device *adev);
+
+struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
+
+int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
+
+int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
+ struct amdgpu_ras_block_object *ras_block_obj);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index d3e055314804..c4283987bb1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -196,7 +196,7 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
/* i2c may be unstable in gpu reset */
down_read(&adev->reset_domain->sem);
- res = amdgpu_eeprom_write(&adev->pm.smu_i2c,
+ res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
@@ -265,7 +265,9 @@ static int amdgpu_ras_eeprom_correct_header_tag(
*/
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
u8 csum;
int res;
@@ -284,6 +286,12 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
control->ras_num_recs = 0;
control->ras_fri = 0;
+ amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_recs);
+
+ control->bad_channel_bitmap = 0;
+ amdgpu_dpm_send_hbm_bad_channel_flag(adev, control->bad_channel_bitmap);
+ con->update_channel_flag = false;
+
amdgpu_ras_debugfs_set_ret_size(control);
mutex_unlock(&control->ras_tbl_mutex);
@@ -391,7 +399,7 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
/* i2c may be unstable in gpu reset */
down_read(&adev->reset_domain->sem);
buf_size = num * RAS_TABLE_RECORD_SIZE;
- res = amdgpu_eeprom_write(&adev->pm.smu_i2c,
+ res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
RAS_INDEX_TO_OFFSET(control, fri),
buf, buf_size);
@@ -417,6 +425,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *record,
const u32 num)
{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(to_amdgpu_device(control));
u32 a, b, i;
u8 *buf, *pp;
int res;
@@ -428,9 +437,16 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
/* Encode all of them in one go.
*/
pp = buf;
- for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE)
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
__encode_table_record_to_buf(control, &record[i], pp);
+ /* update bad channel bitmap */
+ if (!(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ con->update_channel_flag = true;
+ }
+ }
+
/* a, first record index to write into.
* b, last record index to write into.
* a = first index to read (fri) + number of records in the table,
@@ -550,7 +566,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
}
down_read(&adev->reset_domain->sem);
- res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
control->ras_record_offset,
buf, buf_size);
@@ -646,7 +662,7 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
/* i2c may be unstable in gpu reset */
down_read(&adev->reset_domain->sem);
buf_size = num * RAS_TABLE_RECORD_SIZE;
- res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
RAS_INDEX_TO_OFFSET(control, fri),
buf, buf_size);
@@ -683,6 +699,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int i, res;
u8 *buf, *pp;
u32 g0, g1;
@@ -750,8 +767,15 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
/* Read up everything? Then transform.
*/
pp = buf;
- for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE)
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
__decode_table_record_from_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if (!(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ con->update_channel_flag = true;
+ }
+ }
Out:
kfree(buf);
mutex_unlock(&control->ras_tbl_mutex);
@@ -1011,7 +1035,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
return -ENOMEM;
}
- res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address +
control->ras_header_offset,
buf, buf_size);
@@ -1047,7 +1071,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
return 0;
/* Verify i2c adapter is initialized */
- if (!adev->pm.smu_i2c.algo)
+ if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo)
return -ENOENT;
if (!__get_eeprom_i2c_addr(adev, control))
@@ -1059,7 +1083,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
mutex_init(&control->ras_tbl_mutex);
/* Read the table header from EEPROM address */
- res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
control->i2c_address + control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
if (res < RAS_TABLE_HEADER_SIZE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 6bb00578bfbb..54d9bfe0881d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -80,6 +80,10 @@ struct amdgpu_ras_eeprom_control {
/* Protect table access via this mutex.
*/
struct mutex ras_tbl_mutex;
+
+ /* Record channel info which occurred bad pages
+ */
+ u32 bad_channel_bitmap;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 248d64158721..c80af0889773 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -36,8 +36,8 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_init(adev);
break;
default:
@@ -51,8 +51,8 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
{
int ret = 0;
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_fini(adev);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 35bcb6dc1816..7f33ae87cb41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -193,6 +193,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
adev->rings[ring->idx] = ring;
ring->num_hw_submission = sched_hw_submission;
ring->sched_score = sched_score;
+ ring->vmid_wait = dma_fence_get_stub();
r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 48365da213dc..5320bb0883d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -28,6 +28,13 @@
#include <drm/gpu_scheduler.h>
#include <drm/drm_print.h>
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+struct amdgpu_job;
+struct amdgpu_vm;
+
/* max number of rings */
#define AMDGPU_MAX_RINGS 28
#define AMDGPU_MAX_HWIP_RINGS 8
@@ -82,11 +89,13 @@ enum amdgpu_ib_pool_type {
AMDGPU_IB_POOL_MAX
};
-struct amdgpu_device;
-struct amdgpu_ring;
-struct amdgpu_ib;
-struct amdgpu_cs_parser;
-struct amdgpu_job;
+struct amdgpu_ib {
+ struct amdgpu_sa_bo *sa_bo;
+ uint32_t length_dw;
+ uint64_t gpu_addr;
+ uint32_t *ptr;
+ uint32_t flags;
+};
struct amdgpu_sched {
u32 num_scheds;
@@ -111,6 +120,8 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+extern const struct drm_sched_backend_ops amdgpu_sched_ops;
+
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
@@ -144,6 +155,7 @@ struct amdgpu_ring_funcs {
u32 nop;
bool support_64bit_ptrs;
bool no_user_fence;
+ bool secure_submission_supported;
unsigned vmhub;
unsigned extra_dw;
@@ -152,8 +164,12 @@ struct amdgpu_ring_funcs {
u64 (*get_wptr)(struct amdgpu_ring *ring);
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
- int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
- int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ int (*parse_cs)(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
+ int (*patch_cs_in_place)(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
/* constants to calculate how many DW are needed for an emit */
unsigned emit_frame_size;
unsigned emit_ib_size;
@@ -253,8 +269,8 @@ struct amdgpu_ring {
atomic_t *sched_score;
};
-#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
-#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
+#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
+#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
@@ -352,4 +368,29 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+
+static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx)
+{
+ return ib->ptr[idx];
+}
+
+static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx,
+ uint32_t value)
+{
+ ib->ptr[idx] = value;
+}
+
+int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned size,
+ enum amdgpu_ib_pool_type pool,
+ struct amdgpu_ib *ib);
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
+ struct dma_fence *f);
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ struct amdgpu_ib *ibs, struct amdgpu_job *job,
+ struct dma_fence **f);
+int amdgpu_ib_pool_init(struct amdgpu_device *adev);
+void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
+int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 00afd0dcae86..3f671a62b009 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -127,11 +127,19 @@ struct amdgpu_rlc_funcs {
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
- void (*sriov_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
- u32 (*sriov_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
+struct amdgpu_rlcg_reg_access_ctrl {
+ uint32_t scratch_reg0;
+ uint32_t scratch_reg1;
+ uint32_t scratch_reg2;
+ uint32_t scratch_reg3;
+ uint32_t grbm_cntl;
+ uint32_t grbm_idx;
+ uint32_t spare_int;
+};
+
struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
@@ -191,6 +199,10 @@ struct amdgpu_rlc {
struct amdgpu_bo *rlc_toc_bo;
uint64_t rlc_toc_gpu_addr;
void *rlc_toc_buf;
+
+ bool rlcg_reg_access_supported;
+ /* registers for rlcg indirect reg access */
+ struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl;
};
void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 65debb65a5df..e1835fd4b237 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -87,73 +87,30 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
}
int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
- void *ras_ih_info)
+ struct ras_common_if *ras_block)
{
int r, i;
- struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
- struct ras_fs_if fs_info = {
- .sysfs_name = "sdma_err_count",
- };
-
- if (!ih_info)
- return -EINVAL;
-
- if (!adev->sdma.ras_if) {
- adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!adev->sdma.ras_if)
- return -ENOMEM;
- adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
- adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->sdma.ras_if->sub_block_index = 0;
- }
- fs_info.head = ih_info->head = *adev->sdma.ras_if;
- r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
- &fs_info, ih_info);
+ r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
- goto free;
+ return r;
- if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
if (r)
goto late_fini;
}
- } else {
- r = 0;
- goto free;
}
return 0;
late_fini:
- amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
-free:
- kfree(adev->sdma.ras_if);
- adev->sdma.ras_if = NULL;
+ amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
-void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
-{
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
- adev->sdma.ras_if) {
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- /* the cb member will not be used by
- * amdgpu_ras_interrupt_remove_handler, init it only
- * to cheat the check in ras_late_fini
- */
- .cb = amdgpu_sdma_process_ras_data_cb,
- };
-
- amdgpu_ras_late_fini(adev, ras_if, &ih_info);
- kfree(ras_if);
- }
-}
-
int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index f8fb755e3aa6..53ac3ebae8d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -23,6 +23,7 @@
#ifndef __AMDGPU_SDMA_H__
#define __AMDGPU_SDMA_H__
+#include "amdgpu_ras.h"
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 8
@@ -50,13 +51,8 @@ struct amdgpu_sdma_instance {
bool burst_nop;
};
-struct amdgpu_sdma_ras_funcs {
- int (*ras_late_init)(struct amdgpu_device *adev,
- void *ras_ih_info);
- void (*ras_fini)(struct amdgpu_device *adev);
- int (*query_ras_error_count)(struct amdgpu_device *adev,
- uint32_t instance, void *ras_error_status);
- void (*reset_ras_error_count)(struct amdgpu_device *adev);
+struct amdgpu_sdma_ras {
+ struct amdgpu_ras_block_object ras_block;
};
struct amdgpu_sdma {
@@ -73,7 +69,7 @@ struct amdgpu_sdma {
uint32_t srbm_soft_reset;
bool has_page_queue;
struct ras_common_if *ras_if;
- const struct amdgpu_sdma_ras_funcs *funcs;
+ struct amdgpu_sdma_ras *ras;
};
/*
@@ -121,8 +117,7 @@ amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
- void *ras_ih_info);
-void amdgpu_sdma_ras_fini(struct amdgpu_device *adev);
+ struct ras_common_if *ras_block);
int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
deleted file mode 100644
index 909d830b513e..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/*
- * Copyright 2009 VMware, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Michel Dänzer
- */
-
-#include <drm/amdgpu_drm.h>
-#include "amdgpu.h"
-#include "amdgpu_uvd.h"
-#include "amdgpu_vce.h"
-
-/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-static void amdgpu_do_test_moves(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct amdgpu_bo *vram_obj = NULL;
- struct amdgpu_bo **gtt_obj = NULL;
- struct amdgpu_bo_param bp;
- uint64_t gart_addr, vram_addr;
- unsigned n, size;
- int i, r;
-
- size = 1024 * 1024;
-
- /* Number of tests =
- * (Total GTT - gart_pin_size - (2 transfer windows for buffer moves)) / test size
- */
- n = adev->gmc.gart_size - atomic64_read(&adev->gart_pin_size);
- n -= AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS *
- AMDGPU_GPU_PAGE_SIZE;
- n /= size;
-
- gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL);
- if (!gtt_obj) {
- DRM_ERROR("Failed to allocate %d pointers\n", n);
- r = 1;
- goto out_cleanup;
- }
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = 0;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
- r = amdgpu_bo_create(adev, &bp, &vram_obj);
- if (r) {
- DRM_ERROR("Failed to create VRAM object\n");
- goto out_cleanup;
- }
- r = amdgpu_bo_reserve(vram_obj, false);
- if (unlikely(r != 0))
- goto out_unref;
- r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
- if (r) {
- DRM_ERROR("Failed to pin VRAM object\n");
- goto out_unres;
- }
- vram_addr = amdgpu_bo_gpu_offset(vram_obj);
- for (i = 0; i < n; i++) {
- void *gtt_map, *vram_map;
- void **gart_start, **gart_end;
- void **vram_start, **vram_end;
- struct dma_fence *fence = NULL;
-
- bp.domain = AMDGPU_GEM_DOMAIN_GTT;
- r = amdgpu_bo_create(adev, &bp, gtt_obj + i);
- if (r) {
- DRM_ERROR("Failed to create GTT object %d\n", i);
- goto out_lclean;
- }
-
- r = amdgpu_bo_reserve(gtt_obj[i], false);
- if (unlikely(r != 0))
- goto out_lclean_unref;
- r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
- if (r) {
- DRM_ERROR("Failed to pin GTT object %d\n", i);
- goto out_lclean_unres;
- }
- r = amdgpu_ttm_alloc_gart(&gtt_obj[i]->tbo);
- if (r) {
- DRM_ERROR("%p bind failed\n", gtt_obj[i]);
- goto out_lclean_unpin;
- }
- gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
-
- r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
- if (r) {
- DRM_ERROR("Failed to map GTT object %d\n", i);
- goto out_lclean_unpin;
- }
-
- for (gart_start = gtt_map, gart_end = gtt_map + size;
- gart_start < gart_end;
- gart_start++)
- *gart_start = gart_start;
-
- amdgpu_bo_kunmap(gtt_obj[i]);
-
- r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
- size, NULL, &fence, false, false, false);
-
- if (r) {
- DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
- goto out_lclean_unpin;
- }
-
- r = dma_fence_wait(fence, false);
- if (r) {
- DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
- goto out_lclean_unpin;
- }
-
- dma_fence_put(fence);
- fence = NULL;
-
- r = amdgpu_bo_kmap(vram_obj, &vram_map);
- if (r) {
- DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
- goto out_lclean_unpin;
- }
-
- for (gart_start = gtt_map, gart_end = gtt_map + size,
- vram_start = vram_map, vram_end = vram_map + size;
- vram_start < vram_end;
- gart_start++, vram_start++) {
- if (*vram_start != gart_start) {
- DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
- "expected 0x%p (GTT/VRAM offset "
- "0x%16llx/0x%16llx)\n",
- i, *vram_start, gart_start,
- (unsigned long long)
- (gart_addr - adev->gmc.gart_start +
- (void *)gart_start - gtt_map),
- (unsigned long long)
- (vram_addr - adev->gmc.vram_start +
- (void *)gart_start - gtt_map));
- amdgpu_bo_kunmap(vram_obj);
- goto out_lclean_unpin;
- }
- *vram_start = vram_start;
- }
-
- amdgpu_bo_kunmap(vram_obj);
-
- r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
- size, NULL, &fence, false, false, false);
-
- if (r) {
- DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
- goto out_lclean_unpin;
- }
-
- r = dma_fence_wait(fence, false);
- if (r) {
- DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
- goto out_lclean_unpin;
- }
-
- dma_fence_put(fence);
- fence = NULL;
-
- r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
- if (r) {
- DRM_ERROR("Failed to map GTT object after copy %d\n", i);
- goto out_lclean_unpin;
- }
-
- for (gart_start = gtt_map, gart_end = gtt_map + size,
- vram_start = vram_map, vram_end = vram_map + size;
- gart_start < gart_end;
- gart_start++, vram_start++) {
- if (*gart_start != vram_start) {
- DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
- "expected 0x%p (VRAM/GTT offset "
- "0x%16llx/0x%16llx)\n",
- i, *gart_start, vram_start,
- (unsigned long long)
- (vram_addr - adev->gmc.vram_start +
- (void *)vram_start - vram_map),
- (unsigned long long)
- (gart_addr - adev->gmc.gart_start +
- (void *)vram_start - vram_map));
- amdgpu_bo_kunmap(gtt_obj[i]);
- goto out_lclean_unpin;
- }
- }
-
- amdgpu_bo_kunmap(gtt_obj[i]);
-
- DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
- gart_addr - adev->gmc.gart_start);
- continue;
-
-out_lclean_unpin:
- amdgpu_bo_unpin(gtt_obj[i]);
-out_lclean_unres:
- amdgpu_bo_unreserve(gtt_obj[i]);
-out_lclean_unref:
- amdgpu_bo_unref(&gtt_obj[i]);
-out_lclean:
- for (--i; i >= 0; --i) {
- amdgpu_bo_unpin(gtt_obj[i]);
- amdgpu_bo_unreserve(gtt_obj[i]);
- amdgpu_bo_unref(&gtt_obj[i]);
- }
- if (fence)
- dma_fence_put(fence);
- break;
- }
-
- amdgpu_bo_unpin(vram_obj);
-out_unres:
- amdgpu_bo_unreserve(vram_obj);
-out_unref:
- amdgpu_bo_unref(&vram_obj);
-out_cleanup:
- kfree(gtt_obj);
- if (r) {
- pr_warn("Error while testing BO move\n");
- }
-}
-
-void amdgpu_test_moves(struct amdgpu_device *adev)
-{
- if (adev->mman.buffer_funcs)
- amdgpu_do_test_moves(adev);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d855cb53c7e0..06dfcf297a8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -358,11 +358,10 @@ TRACE_EVENT(amdgpu_vm_update_ptes,
}
),
TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
- " flags:0x%llx, incr:%llu, dst:\n%s%s", __entry->pid,
+ " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
__entry->vm_ctx, __entry->start, __entry->end,
__entry->flags, __entry->incr, __print_array(
- __get_dynamic_array(dst), min(__entry->nptes, 32u), 8),
- __entry->nptes > 32 ? "..." : "")
+ __get_dynamic_array(dst), __entry->nptes, 8))
);
TRACE_EVENT(amdgpu_vm_set_ptes,
@@ -537,6 +536,22 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
__entry->seqno)
);
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+ TP_PROTO(uint32_t address, uint32_t value),
+ TP_ARGS(address, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, address)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->value = value;
+ ),
+ TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
#undef AMDGPU_JOB_GET_TIMELINE_NAME
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index 57c6c39ba064..b96d885f6e33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -23,6 +23,7 @@
*/
#include <drm/amdgpu_drm.h>
+#include "amdgpu_cs.h"
#include "amdgpu.h"
#define CREATE_TRACE_POINTS
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 57ac118fc266..f7f149588432 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -50,6 +50,7 @@
#include <drm/ttm/ttm_range_manager.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_object.h"
@@ -170,10 +171,10 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* @bo: buffer object to map
* @mem: memory object to map
* @mm_cur: range to map
- * @num_pages: number of pages to map
* @window: which GART window to use
* @ring: DMA ring to use for the copy
* @tmz: if we should setup a TMZ enabled mapping
+ * @size: in number of bytes to map, out number of bytes mapped
* @addr: resulting address inside the MC address space
*
* Setup one of the GART windows to access a specific piece of memory or return
@@ -182,15 +183,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct amdgpu_res_cursor *mm_cur,
- unsigned num_pages, unsigned window,
- struct amdgpu_ring *ring, bool tmz,
- uint64_t *addr)
+ unsigned window, struct amdgpu_ring *ring,
+ bool tmz, uint64_t *size, uint64_t *addr)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_job *job;
- unsigned num_dw, num_bytes;
- struct dma_fence *fence;
+ unsigned offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr;
+ struct dma_fence *fence;
+ struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
unsigned int i;
@@ -198,7 +198,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
- BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
+
+ if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
+ return -EINVAL;
/* Map only what can't be accessed directly */
if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
@@ -207,10 +209,22 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
return 0;
}
+
+ /*
+ * If start begins at an offset inside the page, then adjust the size
+ * and addr accordingly
+ */
+ offset = mm_cur->start & ~PAGE_MASK;
+
+ num_pages = PFN_UP(*size + offset);
+ num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
+
+ *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
+
*addr = adev->gmc.gart_start;
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- *addr += mm_cur->start & ~PAGE_MASK;
+ *addr += offset;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
@@ -241,10 +255,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
dma_addr_t *dma_addr;
dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
- r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
- cpu_addr);
- if (r)
- goto error_free;
+ amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
} else {
dma_addr_t dma_address;
@@ -252,11 +263,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
dma_address += adev->vm_manager.vram_base_offset;
for (i = 0; i < num_pages; ++i) {
- r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
- &dma_address, flags, cpu_addr);
- if (r)
- goto error_free;
-
+ amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
+ flags, cpu_addr);
dma_address += PAGE_SIZE;
}
}
@@ -297,9 +305,6 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct dma_resv *resv,
struct dma_fence **f)
{
- const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
- AMDGPU_GPU_PAGE_SIZE);
-
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
@@ -315,29 +320,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
mutex_lock(&adev->mman.gtt_window_lock);
while (src_mm.remaining) {
- uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
- uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
+ uint64_t from, to, cur_size;
struct dma_fence *next;
- uint32_t cur_size;
- uint64_t from, to;
- /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
- * begins at an offset, then adjust the size accordingly
- */
- cur_size = max(src_page_offset, dst_page_offset);
- cur_size = min(min3(src_mm.size, dst_mm.size, size),
- (uint64_t)(GTT_MAX_BYTES - cur_size));
+ /* Never copy more than 256MiB at once to avoid a timeout */
+ cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
/* Map src to window 0 and dst to window 1. */
r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
- PFN_UP(cur_size + src_page_offset),
- 0, ring, tmz, &from);
+ 0, ring, tmz, &cur_size, &from);
if (r)
goto error;
r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
- PFN_UP(cur_size + dst_page_offset),
- 1, ring, tmz, &to);
+ 1, ring, tmz, &cur_size, &to);
if (r)
goto error;
@@ -396,8 +392,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
- r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
- NULL, &wipe_fence);
+ r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
if (r) {
goto error;
} else if (wipe_fence) {
@@ -821,14 +816,13 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
#endif
}
-static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
- struct ttm_buffer_object *tbo,
- uint64_t flags)
+static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+ struct ttm_buffer_object *tbo,
+ uint64_t flags)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
struct ttm_tt *ttm = tbo->ttm;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int r;
if (amdgpu_bo_encrypted(abo))
flags |= AMDGPU_PTE_TMZ;
@@ -836,10 +830,8 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
uint64_t page_idx = 1;
- r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
- gtt->ttm.dma_address, flags);
- if (r)
- goto gart_bind_fail;
+ amdgpu_gart_bind(adev, gtt->offset, page_idx,
+ gtt->ttm.dma_address, flags);
/* The memory type of the first page defaults to UC. Now
* modify the memory type to NC from the second page of
@@ -848,21 +840,13 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
- r = amdgpu_gart_bind(adev,
- gtt->offset + (page_idx << PAGE_SHIFT),
- ttm->num_pages - page_idx,
- &(gtt->ttm.dma_address[page_idx]), flags);
+ amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
+ ttm->num_pages - page_idx,
+ &(gtt->ttm.dma_address[page_idx]), flags);
} else {
- r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
- gtt->ttm.dma_address, flags);
+ amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ gtt->ttm.dma_address, flags);
}
-
-gart_bind_fail:
- if (r)
- DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
- ttm->num_pages, gtt->offset);
-
- return r;
}
/*
@@ -878,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void*)ttm;
uint64_t flags;
- int r = 0;
+ int r;
if (!bo_mem)
return -EINVAL;
@@ -925,14 +909,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
/* bind pages into GART page tables */
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
- r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
- gtt->ttm.dma_address, flags);
-
- if (r)
- DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
- ttm->num_pages, gtt->offset);
+ amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ gtt->ttm.dma_address, flags);
gtt->bound = true;
- return r;
+ return 0;
}
/*
@@ -982,12 +962,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
/* Bind pages */
gtt->offset = (u64)tmp->start << PAGE_SHIFT;
- r = amdgpu_ttm_gart_bind(adev, bo, flags);
- if (unlikely(r)) {
- ttm_resource_free(bo, &tmp);
- return r;
- }
-
+ amdgpu_ttm_gart_bind(adev, bo, flags);
amdgpu_gart_invalidate_tlb(adev);
ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, tmp);
@@ -1001,19 +976,16 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
* Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
* rebind GTT pages during a GPU reset.
*/
-int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
uint64_t flags;
- int r;
if (!tbo->ttm)
- return 0;
+ return;
flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
- r = amdgpu_ttm_gart_bind(adev, tbo, flags);
-
- return r;
+ amdgpu_ttm_gart_bind(adev, tbo, flags);
}
/*
@@ -1027,7 +999,6 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int r;
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
@@ -1047,10 +1018,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
return;
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
- r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
- if (r)
- DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
- gtt->ttm.num_pages, gtt->offset);
+ amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
gtt->bound = false;
}
@@ -1169,6 +1137,26 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
}
/**
+ * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
+ * task
+ *
+ * @tbo: The ttm_buffer_object that contains the userptr
+ * @user_addr: The returned value
+ */
+int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
+ uint64_t *user_addr)
+{
+ struct amdgpu_ttm_tt *gtt;
+
+ if (!tbo->ttm)
+ return -EINVAL;
+
+ gtt = (void *)tbo->ttm;
+ *user_addr = gtt->userptr;
+ return 0;
+}
+
+/**
* amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
* task
*
@@ -1433,6 +1421,63 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
}
}
+static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
+ unsigned long offset, void *buf, int len, int write)
+{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct amdgpu_res_cursor src_mm;
+ struct amdgpu_job *job;
+ struct dma_fence *fence;
+ uint64_t src_addr, dst_addr;
+ unsigned int num_dw;
+ int r, idx;
+
+ if (len != PAGE_SIZE)
+ return -EINVAL;
+
+ if (!adev->mman.sdma_access_ptr)
+ return -EACCES;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
+
+ if (write)
+ memcpy(adev->mman.sdma_access_ptr, buf, len);
+
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
+ if (r)
+ goto out;
+
+ amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
+ src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
+ dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
+ if (write)
+ swap(src_addr, dst_addr);
+
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
+
+ amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r) {
+ amdgpu_job_free(job);
+ goto out;
+ }
+
+ if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
+ r = -ETIMEDOUT;
+ dma_fence_put(fence);
+
+ if (!(r || write))
+ memcpy(buf, adev->mman.sdma_access_ptr, len);
+out:
+ drm_dev_exit(idx);
+ return r;
+}
+
/**
* amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
*
@@ -1457,6 +1502,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->resource->mem_type != TTM_PL_VRAM)
return -EIO;
+ if (amdgpu_device_has_timeouts_enabled(adev) &&
+ !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
+ return len;
+
amdgpu_res_first(bo->resource, offset, len, &cursor);
while (cursor.remaining) {
size_t count, size = cursor.size;
@@ -1796,6 +1845,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mman.sdma_access_bo, NULL,
+ &adev->mman.sdma_access_ptr))
+ DRM_WARN("Debug VRAM access will use slowpath MM access\n");
+
return 0;
}
@@ -1817,6 +1872,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
+ &adev->mman.sdma_access_ptr);
amdgpu_ttm_fw_reserve_vram_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -1887,23 +1944,55 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
adev->mman.buffer_funcs_enabled = enable;
}
+static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
+ bool direct_submit,
+ unsigned int num_dw,
+ struct dma_resv *resv,
+ bool vm_needs_flush,
+ struct amdgpu_job **job)
+{
+ enum amdgpu_ib_pool_type pool = direct_submit ?
+ AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED;
+ int r;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
+ if (r)
+ return r;
+
+ if (vm_needs_flush) {
+ (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
+ adev->gmc.pdb0_bo :
+ adev->gart.bo);
+ (*job)->vm_needs_flush = true;
+ }
+ if (resv) {
+ r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
+ if (r) {
+ DRM_ERROR("sync failed (%d).\n", r);
+ amdgpu_job_free(*job);
+ return r;
+ }
+ }
+ return 0;
+}
+
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush, bool tmz)
{
- enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED;
struct amdgpu_device *adev = ring->adev;
+ unsigned num_loops, num_dw;
struct amdgpu_job *job;
-
uint32_t max_bytes;
- unsigned num_loops, num_dw;
unsigned i;
int r;
- if (direct_submit && !ring->sched.ready) {
+ if (!direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -1911,26 +2000,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
-
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
+ r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
+ resv, vm_needs_flush, &job);
if (r)
return r;
- if (vm_needs_flush) {
- job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
- adev->gmc.pdb0_bo : adev->gart.bo);
- job->vm_needs_flush = true;
- }
- if (resv) {
- r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_SYNC_ALWAYS,
- AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- DRM_ERROR("sync failed (%d).\n", r);
- goto error_free;
- }
- }
-
for (i = 0; i < num_loops; i++) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
@@ -1960,77 +2034,35 @@ error_free:
return r;
}
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
- struct dma_resv *resv,
- struct dma_fence **fence)
+static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
+ uint64_t dst_addr, uint32_t byte_count,
+ struct dma_resv *resv,
+ struct dma_fence **fence,
+ bool vm_needs_flush)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-
- struct amdgpu_res_cursor cursor;
+ struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
- uint64_t num_bytes;
-
struct amdgpu_job *job;
+ uint32_t max_bytes;
+ unsigned int i;
int r;
- if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to clear memory with ring turned off.\n");
- return -EINVAL;
- }
-
- if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
- DRM_ERROR("Trying to clear preemptible memory.\n");
- return -EINVAL;
- }
-
- if (bo->tbo.resource->mem_type == TTM_PL_TT) {
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
- if (r)
- return r;
- }
-
- num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
- num_loops = 0;
-
- amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
- while (cursor.remaining) {
- num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
- amdgpu_res_next(&cursor, cursor.size);
- }
- num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
-
- /* for IB padding */
- num_dw += 64;
-
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
- &job);
+ max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
+ num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
+ r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
+ &job);
if (r)
return r;
- if (resv) {
- r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_SYNC_ALWAYS,
- AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- DRM_ERROR("sync failed (%d).\n", r);
- goto error_free;
- }
- }
-
- amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
- while (cursor.remaining) {
- uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
- uint64_t dst_addr = cursor.start;
+ for (i = 0; i < num_loops; i++) {
+ uint32_t cur_size = min(byte_count, max_bytes);
- dst_addr += amdgpu_ttm_domain_start(adev,
- bo->tbo.resource->mem_type);
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
cur_size);
- amdgpu_res_next(&cursor, cur_size);
+ dst_addr += cur_size;
+ byte_count -= cur_size;
}
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@ -2047,6 +2079,55 @@ error_free:
return r;
}
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct dma_resv *resv,
+ struct dma_fence **f)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct dma_fence *fence = NULL;
+ struct amdgpu_res_cursor dst;
+ int r;
+
+ if (!adev->mman.buffer_funcs_enabled) {
+ DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ return -EINVAL;
+ }
+
+ amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
+
+ mutex_lock(&adev->mman.gtt_window_lock);
+ while (dst.remaining) {
+ struct dma_fence *next;
+ uint64_t cur_size, to;
+
+ /* Never fill more than 256MiB at once to avoid timeouts */
+ cur_size = min(dst.size, 256ULL << 20);
+
+ r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
+ 1, ring, false, &cur_size, &to);
+ if (r)
+ goto error;
+
+ r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
+ &next, true);
+ if (r)
+ goto error;
+
+ dma_fence_put(fence);
+ fence = next;
+
+ amdgpu_res_next(&dst, cur_size);
+ }
+error:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+ if (f)
+ *f = dma_fence_get(fence);
+ dma_fence_put(fence);
+ return r;
+}
+
/**
* amdgpu_ttm_evict_resources - evict memory buffers
* @adev: amdgpu device object
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 58c64871c94a..9120ae80ef52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -91,6 +91,10 @@ struct amdgpu_mman {
u64 fw_vram_usage_size;
struct amdgpu_bo *fw_vram_usage_reserved_bo;
void *fw_vram_usage_va;
+
+ /* PAGE_SIZE'd BO for process memory r/w over SDMA. */
+ struct amdgpu_bo *sdma_access_bo;
+ void *sdma_access_ptr;
};
struct amdgpu_copy_mem {
@@ -107,7 +111,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
@@ -149,7 +153,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
struct dma_fence **fence);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
-int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
@@ -168,6 +172,8 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
#endif
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
+ uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
uint64_t addr, uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 7c2538db3cd5..40dffbac85a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -343,7 +343,8 @@ union amdgpu_firmware_header {
* fw loading support
*/
enum AMDGPU_UCODE_ID {
- AMDGPU_UCODE_ID_SDMA0 = 0,
+ AMDGPU_UCODE_ID_CAP = 0,
+ AMDGPU_UCODE_ID_SDMA0,
AMDGPU_UCODE_ID_SDMA1,
AMDGPU_UCODE_ID_SDMA2,
AMDGPU_UCODE_ID_SDMA3,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 46264a4002f7..aad3c8b4c810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -21,7 +21,7 @@
*
*/
-#include "amdgpu_ras.h"
+#include "amdgpu.h"
static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
void *ras_error_status,
@@ -33,14 +33,14 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
int ret = 0;
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc));
+ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
if (ret == -EOPNOTSUPP) {
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_count)
- adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->query_ras_error_address &&
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
adev->umc.max_ras_err_cnt_per_query) {
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
@@ -56,15 +56,15 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
/* umc query_ras_error_address is also responsible for clearing
* error status
*/
- adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
}
} else if (!ret) {
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->ecc_info_query_ras_error_count)
- adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status);
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_count)
+ adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->ecc_info_query_ras_error_address &&
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_address &&
adev->umc.max_ras_err_cnt_per_query) {
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
@@ -80,7 +80,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
/* umc query_ras_error_address is also responsible for clearing
* error status
*/
- adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status);
+ adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
}
}
@@ -96,8 +96,12 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
err_data->err_addr_cnt);
amdgpu_ras_save_bad_pages(adev);
- if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
- adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
+
+ if (con->update_channel_flag == true) {
+ amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
+ con->update_channel_flag = false;
+ }
}
if (reset)
@@ -130,78 +134,39 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
return ret;
}
-static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
void *ras_error_status,
struct amdgpu_iv_entry *entry)
{
return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
}
-int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
- struct ras_fs_if fs_info = {
- .sysfs_name = "umc_err_count",
- };
- struct ras_ih_if ih_info = {
- .cb = amdgpu_umc_process_ras_data_cb,
- };
- if (!adev->umc.ras_if) {
- adev->umc.ras_if =
- kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!adev->umc.ras_if)
- return -ENOMEM;
- adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if->sub_block_index = 0;
- }
- ih_info.head = fs_info.head = *adev->umc.ras_if;
-
- r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
- &fs_info, &ih_info);
+ r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
- goto free;
+ return r;
- if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
if (r)
goto late_fini;
- } else {
- r = 0;
- goto free;
}
/* ras init of specific umc version */
- if (adev->umc.ras_funcs &&
- adev->umc.ras_funcs->err_cnt_init)
- adev->umc.ras_funcs->err_cnt_init(adev);
+ if (adev->umc.ras &&
+ adev->umc.ras->err_cnt_init)
+ adev->umc.ras->err_cnt_init(adev);
return 0;
late_fini:
- amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
-free:
- kfree(adev->umc.ras_if);
- adev->umc.ras_if = NULL;
+ amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
-void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
-{
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
- adev->umc.ras_if) {
- struct ras_common_if *ras_if = adev->umc.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- .cb = amdgpu_umc_process_ras_data_cb,
- };
-
- amdgpu_ras_late_fini(adev, ras_if, &ih_info);
- kfree(ras_if);
- }
-}
-
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -219,3 +184,24 @@ int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
+
+void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+ uint64_t err_addr,
+ uint64_t retired_page,
+ uint32_t channel_index,
+ uint32_t umc_inst)
+{
+ struct eeprom_table_record *err_rec =
+ &err_data->err_addr[err_data->err_addr_cnt];
+
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index b72194e8bfe5..2ec6698aa1fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -20,6 +20,7 @@
*/
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
+#include "amdgpu_ras.h"
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
@@ -40,14 +41,9 @@
#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
-struct amdgpu_umc_ras_funcs {
+struct amdgpu_umc_ras {
+ struct amdgpu_ras_block_object ras_block;
void (*err_cnt_init)(struct amdgpu_device *adev);
- int (*ras_late_init)(struct amdgpu_device *adev);
- void (*ras_fini)(struct amdgpu_device *adev);
- void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- void (*query_ras_error_address)(struct amdgpu_device *adev,
- void *ras_error_status);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
@@ -73,15 +69,23 @@ struct amdgpu_umc {
struct ras_common_if *ras_if;
const struct amdgpu_umc_funcs *funcs;
- const struct amdgpu_umc_ras_funcs *ras_funcs;
+ struct amdgpu_umc_ras *ras;
};
-int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
void *ras_error_status,
bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+ uint64_t err_addr,
+ uint64_t retired_page,
+ uint32_t channel_index,
+ uint32_t umc_inst);
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 6f8de11a17f1..39c74d9fa7cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -37,6 +37,7 @@
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_uvd.h"
+#include "amdgpu_cs.h"
#include "cikd.h"
#include "uvd/uvd_4_2_d.h"
@@ -98,7 +99,7 @@ struct amdgpu_uvd_cs_ctx {
unsigned reg, count;
unsigned data0, data1;
unsigned idx;
- unsigned ib_idx;
+ struct amdgpu_ib *ib;
/* does the IB has a msg command */
bool has_msg_cmd;
@@ -557,8 +558,8 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
uint32_t lo, hi;
uint64_t addr;
- lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
- hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
+ lo = amdgpu_ib_get_value(ctx->ib, ctx->data0);
+ hi = amdgpu_ib_get_value(ctx->ib, ctx->data1);
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
return addr;
@@ -589,7 +590,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
if (!ctx->parser->adev->uvd.address_64_bit) {
/* check if it's a message or feedback command */
- cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
+ cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1;
if (cmd == 0x0 || cmd == 0x3) {
/* yes, force it into VRAM */
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
@@ -834,6 +835,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2];
if (handle == 0) {
+ amdgpu_bo_kunmap(bo);
DRM_ERROR("Invalid UVD handle!\n");
return -EINVAL;
}
@@ -892,6 +894,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
}
+ amdgpu_bo_kunmap(bo);
return -EINVAL;
}
@@ -925,12 +928,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
start += addr;
- amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
- lower_32_bits(start));
- amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
- upper_32_bits(start));
+ amdgpu_ib_set_value(ctx->ib, ctx->data0, lower_32_bits(start));
+ amdgpu_ib_set_value(ctx->ib, ctx->data1, upper_32_bits(start));
- cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
+ cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1;
if (cmd < 0x4) {
if ((end - start) < ctx->buf_sizes[cmd]) {
DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
@@ -990,14 +991,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
{
- struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
int i, r;
ctx->idx++;
for (i = 0; i <= ctx->count; ++i) {
unsigned reg = ctx->reg + i;
- if (ctx->idx >= ib->length_dw) {
+ if (ctx->idx >= ctx->ib->length_dw) {
DRM_ERROR("Register command after end of CS!\n");
return -EINVAL;
}
@@ -1037,11 +1037,10 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
{
- struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
int r;
- for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
- uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
+ for (ctx->idx = 0 ; ctx->idx < ctx->ib->length_dw; ) {
+ uint32_t cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx);
unsigned type = CP_PACKET_GET_TYPE(cmd);
switch (type) {
case PACKET_TYPE0:
@@ -1066,11 +1065,14 @@ static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
* amdgpu_uvd_ring_parse_cs - UVD command submission parser
*
* @parser: Command submission parser context
- * @ib_idx: Which indirect buffer to use
+ * @job: the job to parse
+ * @ib: the IB to patch
*
* Parse the command stream, patch in addresses as necessary.
*/
-int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
+int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
struct amdgpu_uvd_cs_ctx ctx = {};
unsigned buf_sizes[] = {
@@ -1080,10 +1082,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
[0x00000003] = 2048,
[0x00000004] = 0xFFFFFFFF,
};
- struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
- parser->job->vm = NULL;
+ job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
if (ib->length_dw % 16) {
@@ -1094,7 +1095,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
- ctx.ib_idx = ib_idx;
+ ctx.ib = ib;
/* first round only required on chips without UVD 64 bit address support */
if (!parser->adev->uvd.address_64_bit) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 76ac9699885d..9f89bb7cd60b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -82,7 +82,9 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
-int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
+int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 344f711ad144..02cb3a12dd76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -34,6 +34,7 @@
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_vce.h"
+#include "amdgpu_cs.h"
#include "cikd.h"
/* 1 second timeout */
@@ -587,8 +588,7 @@ err:
/**
* amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
*
- * @p: parser context
- * @ib_idx: indirect buffer to use
+ * @ib: indirect buffer to use
* @lo: address of lower dword
* @hi: address of higher dword
* @size: minimum size
@@ -596,8 +596,9 @@ err:
*
* Make sure that no BO cross a 4GB boundary.
*/
-static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
- int lo, int hi, unsigned size, int32_t index)
+static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
+ struct amdgpu_ib *ib, int lo, int hi,
+ unsigned size, int32_t index)
{
int64_t offset = ((uint64_t)size) * ((int64_t)index);
struct ttm_operation_ctx ctx = { false, false };
@@ -607,8 +608,8 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
uint64_t addr;
int r;
- addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
- ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
+ addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
+ ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
if (index >= 0) {
addr += offset;
fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
@@ -638,7 +639,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
* amdgpu_vce_cs_reloc - command submission relocation
*
* @p: parser context
- * @ib_idx: indirect buffer to use
+ * @ib: indirect buffer to use
* @lo: address of lower dword
* @hi: address of higher dword
* @size: minimum size
@@ -646,7 +647,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
*
* Patch relocation inside command stream with real buffer address
*/
-static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
+static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
int lo, int hi, unsigned size, uint32_t index)
{
struct amdgpu_bo_va_mapping *mapping;
@@ -657,8 +658,8 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
if (index == 0xffffffff)
index = 0;
- addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
- ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
+ addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
+ ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index);
r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
@@ -679,8 +680,8 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
addr += amdgpu_bo_gpu_offset(bo);
addr -= ((uint64_t)size) * ((uint64_t)index);
- amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
- amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
+ amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
+ amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
return 0;
}
@@ -729,11 +730,13 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
* amdgpu_vce_ring_parse_cs - parse and validate the command stream
*
* @p: parser context
- * @ib_idx: indirect buffer to use
+ * @job: the job to parse
+ * @ib: the IB to patch
*/
-int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
unsigned fb_idx = 0, bs_idx = 0;
int session_idx = -1;
uint32_t destroyed = 0;
@@ -744,12 +747,12 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
unsigned idx;
int i, r = 0;
- p->job->vm = NULL;
+ job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
for (idx = 0; idx < ib->length_dw;) {
- uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
- uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+ uint32_t len = amdgpu_ib_get_value(ib, idx);
+ uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
@@ -759,52 +762,52 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
switch (cmd) {
case 0x00000002: /* task info */
- fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
- bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
+ fb_idx = amdgpu_ib_get_value(ib, idx + 6);
+ bs_idx = amdgpu_ib_get_value(ib, idx + 7);
break;
case 0x03000001: /* encode */
- r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
- idx + 9, 0, 0);
+ r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
+ 0, 0);
if (r)
goto out;
- r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
- idx + 11, 0, 0);
+ r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
+ 0, 0);
if (r)
goto out;
break;
case 0x05000001: /* context buffer */
- r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
- idx + 2, 0, 0);
+ r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
+ 0, 0);
if (r)
goto out;
break;
case 0x05000004: /* video bitstream buffer */
- tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
- r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
+ tmp = amdgpu_ib_get_value(ib, idx + 4);
+ r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
tmp, bs_idx);
if (r)
goto out;
break;
case 0x05000005: /* feedback buffer */
- r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
+ r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
4096, fb_idx);
if (r)
goto out;
break;
case 0x0500000d: /* MV buffer */
- r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
- idx + 2, 0, 0);
+ r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
+ 0, 0);
if (r)
goto out;
- r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
- idx + 7, 0, 0);
+ r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
+ 0, 0);
if (r)
goto out;
break;
@@ -814,12 +817,12 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
}
for (idx = 0; idx < ib->length_dw;) {
- uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
- uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+ uint32_t len = amdgpu_ib_get_value(ib, idx);
+ uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
switch (cmd) {
case 0x00000001: /* session */
- handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ handle = amdgpu_ib_get_value(ib, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
if (session_idx < 0) {
@@ -830,8 +833,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
break;
case 0x00000002: /* task info */
- fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
- bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
+ fb_idx = amdgpu_ib_get_value(ib, idx + 6);
+ bs_idx = amdgpu_ib_get_value(ib, idx + 7);
break;
case 0x01000001: /* create */
@@ -846,8 +849,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
}
- *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
- amdgpu_get_ib_value(p, ib_idx, idx + 10) *
+ *size = amdgpu_ib_get_value(ib, idx + 8) *
+ amdgpu_ib_get_value(ib, idx + 10) *
8 * 3 / 2;
break;
@@ -876,12 +879,12 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
break;
case 0x03000001: /* encode */
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
*size, 0);
if (r)
goto out;
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
*size / 3, 0);
if (r)
goto out;
@@ -892,35 +895,35 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
break;
case 0x05000001: /* context buffer */
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
*size * 2, 0);
if (r)
goto out;
break;
case 0x05000004: /* video bitstream buffer */
- tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ tmp = amdgpu_ib_get_value(ib, idx + 4);
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
tmp, bs_idx);
if (r)
goto out;
break;
case 0x05000005: /* feedback buffer */
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
4096, fb_idx);
if (r)
goto out;
break;
case 0x0500000d: /* MV buffer */
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
- idx + 2, *size, 0);
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
+ idx + 2, *size, 0);
if (r)
goto out;
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
- idx + 7, *size / 12, 0);
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
+ idx + 7, *size / 12, 0);
if (r)
goto out;
break;
@@ -965,11 +968,13 @@ out:
* amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
*
* @p: parser context
- * @ib_idx: indirect buffer to use
+ * @job: the job to parse
+ * @ib: the IB to patch
*/
-int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
int session_idx = -1;
uint32_t destroyed = 0;
uint32_t created = 0;
@@ -978,8 +983,8 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
int i, r = 0, idx = 0;
while (idx < ib->length_dw) {
- uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
- uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+ uint32_t len = amdgpu_ib_get_value(ib, idx);
+ uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
@@ -989,7 +994,7 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
switch (cmd) {
case 0x00000001: /* session */
- handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ handle = amdgpu_ib_get_value(ib, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
if (session_idx < 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index be4a6e773c5b..ea680fc9a6c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -59,8 +59,11 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
-int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct amdgpu_ib *ib, uint32_t flags);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 9a19a6a57b23..f99093f2ebc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/debugfs.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
@@ -51,6 +52,7 @@
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
+#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2_vcn.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -68,6 +70,7 @@ MODULE_FIRMWARE(FIRMWARE_VANGOGH);
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
+MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -77,6 +80,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
+ unsigned int fw_shared_size, log_offset;
int i, r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -165,6 +169,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
+ case IP_VERSION(3, 1, 2):
+ fw_name = FIRMWARE_VCN_3_1_2;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
default:
return -EINVAL;
}
@@ -218,7 +228,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
- bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+ fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+ log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
+ bo_size += fw_shared_size;
+
+ if (amdgpu_vcnfw_log)
+ bo_size += AMDGPU_VCNFW_LOG_SIZE;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
@@ -232,10 +247,18 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return r;
}
- adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
- bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
- adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
- bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+ adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
+ bo_size - fw_shared_size;
+ adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
+ bo_size - fw_shared_size;
+
+ adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
+
+ if (amdgpu_vcnfw_log) {
+ adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.log_offset = log_offset;
+ }
if (adev->vcn.indirect_sram) {
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
@@ -971,3 +994,112 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
}
}
+
+/*
+ * debugfs for mapping vcn firmware log buffer.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_vcn_inst *vcn;
+ void *log_buf;
+ volatile struct amdgpu_vcn_fwlog *plog;
+ unsigned int read_pos, write_pos, available, i, read_bytes = 0;
+ unsigned int read_num[2] = {0};
+
+ vcn = file_inode(f)->i_private;
+ if (!vcn)
+ return -ENODEV;
+
+ if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
+ return -EFAULT;
+
+ log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
+
+ plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
+ read_pos = plog->rptr;
+ write_pos = plog->wptr;
+
+ if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
+ return -EFAULT;
+
+ if (!size || (read_pos == write_pos))
+ return 0;
+
+ if (write_pos > read_pos) {
+ available = write_pos - read_pos;
+ read_num[0] = min(size, (size_t)available);
+ } else {
+ read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
+ available = read_num[0] + write_pos - plog->header_size;
+ if (size > available)
+ read_num[1] = write_pos - plog->header_size;
+ else if (size > read_num[0])
+ read_num[1] = size - read_num[0];
+ else
+ read_num[0] = size;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (read_num[i]) {
+ if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
+ read_pos = plog->header_size;
+ if (read_num[i] == copy_to_user((buf + read_bytes),
+ (log_buf + read_pos), read_num[i]))
+ return -EFAULT;
+
+ read_bytes += read_num[i];
+ read_pos += read_num[i];
+ }
+ }
+
+ plog->rptr = read_pos;
+ *pos += read_bytes;
+ return read_bytes;
+}
+
+static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_vcn_fwlog_read,
+ .llseek = default_llseek
+};
+#endif
+
+void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
+ struct amdgpu_vcn_inst *vcn)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ sprintf(name, "amdgpu_vcn_%d_fwlog", i);
+ debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
+ &amdgpu_debugfs_vcnfwlog_fops,
+ AMDGPU_VCNFW_LOG_SIZE);
+#endif
+}
+
+void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
+{
+#if defined(CONFIG_DEBUG_FS)
+ volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
+ void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
+ uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
+ volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
+ volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ + vcn->fw_shared.log_offset;
+ *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
+ fw_log->is_enabled = 1;
+ fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
+ fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
+ fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
+
+ log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
+ log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
+ log_buf->rptr = log_buf->header_size;
+ log_buf->wptr = log_buf->header_size;
+ log_buf->wrapped = 0;
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 5d3728b027d3..e2fde88aaf5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -158,6 +158,7 @@
#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
+#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
@@ -205,6 +206,13 @@ struct amdgpu_vcn_reg{
unsigned scratch9;
};
+struct amdgpu_vcn_fw_shared {
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ uint32_t mem_size;
+ uint32_t log_offset;
+};
+
struct amdgpu_vcn_inst {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
@@ -221,8 +229,7 @@ struct amdgpu_vcn_inst {
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
atomic_t dpg_enc_submission_cnt;
- void *fw_shared_cpu_addr;
- uint64_t fw_shared_gpu_addr;
+ struct amdgpu_vcn_fw_shared fw_shared;
};
struct amdgpu_vcn {
@@ -265,6 +272,13 @@ struct amdgpu_fw_shared_sw_ring {
uint8_t padding[3];
};
+struct amdgpu_fw_shared_fw_logging {
+ uint8_t is_enabled;
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t size;
+};
+
struct amdgpu_fw_shared {
uint32_t present_flag_0;
uint8_t pad[44];
@@ -272,6 +286,15 @@ struct amdgpu_fw_shared {
uint8_t pad1[1];
struct amdgpu_fw_shared_multi_queue multi_queue;
struct amdgpu_fw_shared_sw_ring sw_ring;
+ struct amdgpu_fw_shared_fw_logging fw_log;
+};
+
+struct amdgpu_vcn_fwlog {
+ uint32_t rptr;
+ uint32_t wptr;
+ uint32_t buffer_size;
+ uint32_t header_size;
+ uint8_t wrapped;
};
struct amdgpu_vcn_decode_buffer {
@@ -313,4 +336,7 @@ enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev);
+void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn);
+void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
+ uint8_t i, struct amdgpu_vcn_inst *vcn);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 3a25dd220786..a025f080aa6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -822,3 +822,148 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
}
}
}
+
+static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
+ u32 acc_flags, u32 hwip,
+ bool write, u32 *rlcg_flag)
+{
+ bool ret = false;
+
+ switch (hwip) {
+ case GC_HWIP:
+ if (amdgpu_sriov_reg_indirect_gc(adev)) {
+ *rlcg_flag =
+ write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
+ ret = true;
+ /* only in new version, AMDGPU_REGS_NO_KIQ and
+ * AMDGPU_REGS_RLC are enabled simultaneously */
+ } else if ((acc_flags & AMDGPU_REGS_RLC) &&
+ !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
+ *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
+ ret = true;
+ }
+ break;
+ case MMHUB_HWIP:
+ if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
+ (acc_flags & AMDGPU_REGS_RLC) && write) {
+ *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
+ ret = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+{
+ struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+ uint32_t timeout = 50000;
+ uint32_t i, tmp;
+ uint32_t ret = 0;
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+
+ if (!adev->gfx.rlc.rlcg_reg_access_supported) {
+ dev_err(adev->dev,
+ "indirect registers access through rlcg is not available\n");
+ return 0;
+ }
+
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
+ scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
+ scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+ scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
+ if (reg_access_ctrl->spare_int)
+ spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+
+ if (offset == reg_access_ctrl->grbm_cntl) {
+ /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
+ writel(v, scratch_reg2);
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else if (offset == reg_access_ctrl->grbm_idx) {
+ /* if the target reg offset is grbm_idx, write to scratch_reg3 */
+ writel(v, scratch_reg3);
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ /*
+ * SCRATCH_REG0 = read/write value
+ * SCRATCH_REG1[30:28] = command
+ * SCRATCH_REG1[19:0] = address in dword
+ * SCRATCH_REG1[26:24] = Error reporting
+ */
+ writel(v, scratch_reg0);
+ writel((offset | flag), scratch_reg1);
+ if (reg_access_ctrl->spare_int)
+ writel(1, spare_int);
+
+ for (i = 0; i < timeout; i++) {
+ tmp = readl(scratch_reg1);
+ if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
+ break;
+ udelay(10);
+ }
+
+ if (i >= timeout) {
+ if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
+ if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
+ dev_err(adev->dev,
+ "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
+ } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
+ dev_err(adev->dev,
+ "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
+ } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
+ dev_err(adev->dev,
+ "regiser is not in range, rlcg failed to program reg: 0x%05x\n", offset);
+ } else {
+ dev_err(adev->dev,
+ "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
+ }
+ } else {
+ dev_err(adev->dev,
+ "timeout: rlcg faled to program reg: 0x%05x\n", offset);
+ }
+ }
+ }
+
+ ret = readl(scratch_reg0);
+ return ret;
+}
+
+void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+ u32 offset, u32 value,
+ u32 acc_flags, u32 hwip)
+{
+ u32 rlcg_flag;
+
+ if (!amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
+ amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
+ return;
+ }
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ WREG32_NO_KIQ(offset, value);
+ else
+ WREG32(offset, value);
+}
+
+u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+ u32 offset, u32 acc_flags, u32 hwip)
+{
+ u32 rlcg_flag;
+
+ if (!amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
+ return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ return RREG32_NO_KIQ(offset);
+ else
+ return RREG32(offset);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 9adfb8d63280..239f232f9c02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -32,6 +32,19 @@
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+/* flags for indirect register access path supported by rlcg for sriov */
+#define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28)
+#define AMDGPU_RLCG_GC_WRITE (0x0 << 28)
+#define AMDGPU_RLCG_GC_READ (0x1 << 28)
+#define AMDGPU_RLCG_MMHUB_WRITE (0x2 << 28)
+
+/* error code for indirect register access path supported by rlcg for sriov */
+#define AMDGPU_RLCG_VFGATE_DISABLED 0x4000000
+#define AMDGPU_RLCG_WRONG_OPERATION_TYPE 0x2000000
+#define AMDGPU_RLCG_REG_NOT_IN_RANGE 0x1000000
+
+#define AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK 0xFFFFF
+
/* all asic after AI use this offset */
#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
/* tonga/fiji use this offset */
@@ -275,13 +288,18 @@ struct amdgpu_video_codec_info;
(amdgpu_sriov_vf((adev)) && \
((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
+ (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
static inline bool is_virtual_machine(void)
{
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86)
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#elif defined(CONFIG_ARM64)
+ return !is_kernel_in_hyp_mode();
#else
return false;
#endif
@@ -293,7 +311,6 @@ static inline bool is_virtual_machine(void)
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
-
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
@@ -321,4 +338,9 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
struct amdgpu_video_codec_info *decode, uint32_t decode_array_size);
+void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+ u32 offset, u32 value,
+ u32 acc_flags, u32 hwip);
+u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+ u32 offset, u32 acc_flags, u32 hwip);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index d99c8779b51e..5224d9a39737 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -391,7 +391,6 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
int index)
{
struct drm_plane *plane;
- uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID};
int ret;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
@@ -402,7 +401,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
&amdgpu_vkms_plane_funcs,
amdgpu_vkms_formats,
ARRAY_SIZE(amdgpu_vkms_formats),
- modifiers, type, NULL);
+ NULL, type, NULL);
if (ret) {
kfree(plane);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 28f5e8b21a99..5d11978c162e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -375,6 +375,8 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
return;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
+
ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
amdgpu_vm_bo_relocated(base);
@@ -710,11 +712,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if eviction list is empty.
+ * True if VM is not evicting.
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- return list_empty(&vm->evicted);
+ bool ret;
+
+ amdgpu_vm_eviction_lock(vm);
+ ret = !vm->evicting;
+ amdgpu_vm_eviction_unlock(vm);
+
+ return ret && list_empty(&vm->evicted);
}
/**
@@ -1580,7 +1588,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
nptes = max(nptes, 1u);
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
- nptes, dst, incr, upd_flags,
+ min(nptes, 32u), dst, incr, upd_flags,
vm->task_info.pid,
vm->immediate.fence_context);
amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
@@ -2203,6 +2211,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (!bo)
return bo_va;
+ dma_resv_assert_held(bo->tbo.base.resv);
if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
bo_va->is_xgmi = true;
/* Power up XGMI if it can be potentially used */
@@ -2580,7 +2589,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
}
/**
- * amdgpu_vm_bo_rmv - remove a bo to a specific vm
+ * amdgpu_vm_bo_del - remove a bo from a specific vm
*
* @adev: amdgpu_device pointer
* @bo_va: requested bo_va
@@ -2589,7 +2598,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
*
* Object have to be reserved!
*/
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+void amdgpu_vm_bo_del(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va)
{
struct amdgpu_bo_va_mapping *mapping, *next;
@@ -2597,7 +2606,10 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_vm_bo_base **base;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
+
if (bo) {
+ dma_resv_assert_held(bo->tbo.base.resv);
if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
ttm_bo_set_bulk_move(&bo->tbo, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 4d236682a118..bd7892482bbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -433,7 +433,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+void amdgpu_vm_bo_del(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index e50fe25fbcb8..0a7611648573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -283,7 +283,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
rsv->mm_node.size = size >> PAGE_SHIFT;
spin_lock(&mgr->lock);
- list_add_tail(&mgr->reservations_pending, &rsv->node);
+ list_add_tail(&rsv->node, &mgr->reservations_pending);
amdgpu_vram_mgr_do_reserve(&mgr->manager);
spin_unlock(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index eb06322d2972..1b108d03e785 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -34,7 +34,6 @@
#include "amdgpu_reset.h"
-#define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
@@ -69,17 +68,6 @@ static const int wafl_pcs_err_status_reg_arct[] = {
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};
-static const int xgmi23_pcs_err_status_reg_aldebaran[] = {
- smnPCS_XGMI23_PCS_ERROR_STATUS,
- smnPCS_XGMI23_PCS_ERROR_STATUS + 0x100000,
- smnPCS_XGMI23_PCS_ERROR_STATUS + 0x200000,
- smnPCS_XGMI23_PCS_ERROR_STATUS + 0x300000,
- smnPCS_XGMI23_PCS_ERROR_STATUS + 0x400000,
- smnPCS_XGMI23_PCS_ERROR_STATUS + 0x500000,
- smnPCS_XGMI23_PCS_ERROR_STATUS + 0x600000,
- smnPCS_XGMI23_PCS_ERROR_STATUS + 0x700000
-};
-
static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
smnPCS_XGMI3X16_PCS_ERROR_STATUS,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
@@ -757,53 +745,15 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return psp_xgmi_terminate(&adev->psp);
}
-static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
- int r;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "xgmi_wafl_err_count",
- };
-
if (!adev->gmc.xgmi.supported ||
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
- adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
-
- if (!adev->gmc.xgmi.ras_if) {
- adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!adev->gmc.xgmi.ras_if)
- return -ENOMEM;
- adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
- adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->gmc.xgmi.ras_if->sub_block_index = 0;
- }
- ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
- r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
- &fs_info, &ih_info);
- if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
- kfree(adev->gmc.xgmi.ras_if);
- adev->gmc.xgmi.ras_if = NULL;
- }
-
- return r;
-}
+ adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
-static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
-{
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
- adev->gmc.xgmi.ras_if) {
- struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
- struct ras_ih_if ih_info = {
- .cb = NULL,
- };
-
- amdgpu_ras_late_fini(adev, ras_if, &ih_info);
- kfree(ras_if);
- }
+ return amdgpu_ras_block_late_init(adev, ras_block);
}
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
@@ -835,9 +785,6 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
xgmi_pcs_err_status_reg_vg20[i]);
break;
case CHIP_ALDEBARAN:
- for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
- pcs_clear_status(adev,
- xgmi23_pcs_err_status_reg_aldebaran[i]);
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev,
xgmi3x16_pcs_err_status_reg_aldebaran[i]);
@@ -890,7 +837,7 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -899,7 +846,7 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
uint32_t ue_cnt = 0, ce_cnt = 0;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
- return -EINVAL;
+ return ;
err_data->ue_count = 0;
err_data->ce_count = 0;
@@ -938,13 +885,6 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
}
break;
case CHIP_ALDEBARAN:
- /* check xgmi23 pcs error */
- for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) {
- data = RREG32_PCIE(xgmi23_pcs_err_status_reg_aldebaran[i]);
- if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, true);
- }
/* check xgmi3x16 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
@@ -965,17 +905,53 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
break;
}
- adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
+ adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt;
+}
- return 0;
+/* Trigger XGMI/WAFL error */
+static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *inject_if)
+{
+ int ret = 0;
+ struct ta_ras_trigger_error_input *block_info =
+ (struct ta_ras_trigger_error_input *)inject_if;
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
+
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
+ dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+ ret = psp_ras_trigger_error(&adev->psp, block_info);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
+
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+ dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ dev_warn(adev->dev, "Failed to allow df cstate");
+
+ return ret;
}
-const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
- .ras_late_init = amdgpu_xgmi_ras_late_init,
- .ras_fini = amdgpu_xgmi_ras_fini,
+struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = {
.query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
.reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
+ .ras_error_inject = amdgpu_ras_error_inject_xgmi,
+};
+
+struct amdgpu_xgmi_ras xgmi_ras = {
+ .ras_block = {
+ .ras_comm = {
+ .name = "xgmi_wafl",
+ .block = AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ },
+ .hw_ops = &xgmi_ras_hw_ops,
+ .ras_late_init = amdgpu_xgmi_ras_late_init,
+ },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index dcaad22be492..552e6fb55aa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -24,7 +24,7 @@
#include <drm/task_barrier.h>
#include "amdgpu_psp.h"
-
+#include "amdgpu_ras.h"
struct amdgpu_hive_info {
struct kobject kobj;
@@ -51,7 +51,7 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift;
};
-extern const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs;
+extern struct amdgpu_xgmi_ras xgmi_ras;
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
@@ -67,7 +67,8 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
{
- return (adev != bo_adev &&
+ return (amdgpu_use_xgmi_p2p &&
+ adev != bo_adev &&
adev->gmc.xgmi.hive_id &&
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
index 3ea557864320..88642e7ecdf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -68,12 +68,13 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 0):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(1, 5, 0):
athub_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index ab6a07e5e8c4..a720436857b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -78,6 +78,7 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
return 0;
switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(1, 3, 1):
case IP_VERSION(2, 0, 0):
case IP_VERSION(2, 0, 2):
athub_v2_0_update_medium_grain_clock_gating(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index 2edefd10e56c..ad8e87d3d2cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -74,6 +74,7 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 1, 2):
+ case IP_VERSION(2, 4, 0):
athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE);
athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f10ce740a29c..de6d10390ab2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1719,7 +1719,7 @@ static void cik_program_aspm(struct amdgpu_device *adev)
bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
bool disable_clkreq = false;
- if (amdgpu_aspm == 0)
+ if (!amdgpu_device_should_use_aspm(adev))
return;
if (pci_is_root_bus(adev->pdev->bus))
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index fb61c0814115..288fce7dc0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2532,7 +2532,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
/* adjust pm to dpms */
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_dpm_compute_clocks(adev);
}
static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 17942a11366d..cbe5250b31cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2608,7 +2608,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
/* adjust pm to dpms */
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_dpm_compute_clocks(adev);
}
static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 2ec99ec8e1a3..982855e6cf52 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2424,7 +2424,7 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
/* adjust pm to dpms */
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_dpm_compute_clocks(adev);
}
static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index de11fbe5aba2..84440741c60b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2433,7 +2433,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
/* adjust pm to dpms */
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_dpm_compute_clocks(adev);
}
static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 43c5e3ec9a39..f4dfca013ec5 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -458,7 +458,7 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
#define DEFERRED_ARM_MASK (1 << 31)
static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
- int counter_idx, uint64_t config,
+ uint64_t config, int counter_idx,
bool is_deferred)
{
@@ -476,8 +476,8 @@ static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
}
static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
- int counter_idx,
- uint64_t config)
+ uint64_t config,
+ int counter_idx)
{
return (df_v3_6_pmc_has_counter(adev, config, counter_idx) &&
(adev->df_perfmon_config_assign_mask[counter_idx]
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index dbe7442fb25c..f4c6accd3226 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -56,10 +56,6 @@
#define GFX10_NUM_GFX_RINGS_Sienna_Cichlid 1
#define GFX10_MEC_HPD_SIZE 2048
-#define RLCG_VFGATE_DISABLED 0x4000000
-#define RLCG_WRONG_OPERATION_TYPE 0x2000000
-#define RLCG_NOT_IN_RANGE 0x1000000
-
#define F32_CE_PROGRAM_RAM_SIZE 65536
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -110,6 +106,12 @@
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1
+
+#define mmGOLDEN_TSC_COUNT_UPPER_GC_10_3_6 0x002d
+#define mmGOLDEN_TSC_COUNT_UPPER_GC_10_3_6_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER_GC_10_3_6 0x002e
+#define mmGOLDEN_TSC_COUNT_LOWER_GC_10_3_6_BASE_IDX 1
+
#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
@@ -180,14 +182,6 @@
#define mmRLC_SPARE_INT_0_Sienna_Cichlid 0x4ca5
#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX 1
-#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28)
-#define GFX_RLCG_GC_WRITE (0x0 << 28)
-#define GFX_RLCG_GC_READ (0x1 << 28)
-#define GFX_RLCG_MMHUB_WRITE (0x2 << 28)
-
-#define RLCG_ERROR_REPORT_ENABLED(adev) \
- (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
-
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -256,13 +250,6 @@ MODULE_FIRMWARE("amdgpu/yellow_carp_mec.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_mec2.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_rlc.bin");
-MODULE_FIRMWARE("amdgpu/cyan_skillfish_ce.bin");
-MODULE_FIRMWARE("amdgpu/cyan_skillfish_pfp.bin");
-MODULE_FIRMWARE("amdgpu/cyan_skillfish_me.bin");
-MODULE_FIRMWARE("amdgpu/cyan_skillfish_mec.bin");
-MODULE_FIRMWARE("amdgpu/cyan_skillfish_mec2.bin");
-MODULE_FIRMWARE("amdgpu/cyan_skillfish_rlc.bin");
-
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_ce.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_pfp.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_me.bin");
@@ -270,6 +257,20 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec2.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_6_ce.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_6_pfp.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_6_me.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_6_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_6_mec2.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_6_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/gc_10_3_7_ce.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_7_pfp.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_7_me.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec2.bin");
+MODULE_FIRMWARE("amdgpu/gc_10_3_7_rlc.bin");
+
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
@@ -1463,143 +1464,6 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
};
-static bool gfx_v10_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
- int write, u32 *rlcg_flag)
-{
- switch (hwip) {
- case GC_HWIP:
- if (amdgpu_sriov_reg_indirect_gc(adev)) {
- *rlcg_flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
-
- return true;
- /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
- } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ)) {
- *rlcg_flag = GFX_RLCG_GC_WRITE_OLD;
-
- return true;
- }
-
- break;
- case MMHUB_HWIP:
- if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
- (acc_flags & AMDGPU_REGS_RLC) && write) {
- *rlcg_flag = GFX_RLCG_MMHUB_WRITE;
- return true;
- }
-
- break;
- default:
- DRM_DEBUG("Not program register by RLCG\n");
- }
-
- return false;
-}
-
-static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
-{
- static void *scratch_reg0;
- static void *scratch_reg1;
- static void *scratch_reg2;
- static void *scratch_reg3;
- static void *spare_int;
- static uint32_t grbm_cntl;
- static uint32_t grbm_idx;
- uint32_t i = 0;
- uint32_t retries = 50000;
- u32 ret = 0;
- u32 tmp;
-
- scratch_reg0 = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4;
- scratch_reg1 = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4;
- scratch_reg2 = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
- scratch_reg3 = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
-
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
- spare_int = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
- + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
- } else {
- spare_int = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
- }
-
- grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
- grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
-
- if (offset == grbm_cntl || offset == grbm_idx) {
- if (offset == grbm_cntl)
- writel(v, scratch_reg2);
- else if (offset == grbm_idx)
- writel(v, scratch_reg3);
-
- writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
- } else {
- writel(v, scratch_reg0);
- writel(offset | flag, scratch_reg1);
- writel(1, spare_int);
-
- for (i = 0; i < retries; i++) {
- tmp = readl(scratch_reg1);
- if (!(tmp & flag))
- break;
-
- udelay(10);
- }
-
- if (i >= retries) {
- if (RLCG_ERROR_REPORT_ENABLED(adev)) {
- if (tmp & RLCG_VFGATE_DISABLED)
- pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
- else if (tmp & RLCG_WRONG_OPERATION_TYPE)
- pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
- else if (tmp & RLCG_NOT_IN_RANGE)
- pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
- else
- pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
- } else
- pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
- }
- }
-
- ret = readl(scratch_reg0);
-
- return ret;
-}
-
-static void gfx_v10_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
-{
- u32 rlcg_flag;
-
- if (!amdgpu_sriov_runtime(adev) &&
- gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
- gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
- return;
- }
-
- if (acc_flags & AMDGPU_REGS_NO_KIQ)
- WREG32_NO_KIQ(offset, value);
- else
- WREG32(offset, value);
-}
-
-static u32 gfx_v10_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
-{
- u32 rlcg_flag;
-
- if (!amdgpu_sriov_runtime(adev) &&
- gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
- return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
-
- if (acc_flags & AMDGPU_REGS_NO_KIQ)
- return RREG32_NO_KIQ(offset);
- else
- return RREG32(offset);
-}
-
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
{
/* Pending on emulation bring up */
@@ -3557,6 +3421,57 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_cyan_skillfish[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
};
+static const struct soc15_reg_golden golden_settings_gc_10_3_6[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0xffffff7f, 0x00010020),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000)
+};
+
+static const struct soc15_reg_golden golden_settings_gc_10_3_7[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000041),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf000003f, 0x01200007),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0xffffff7f, 0x00010020),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000)
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -3790,10 +3705,21 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_5));
break;
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_cyan_skillfish,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish));
break;
+ case IP_VERSION(10, 3, 6):
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_3_6,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_3_6));
+ break;
+ case IP_VERSION(10, 3, 7):
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_3_7,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_3_7));
+ break;
default:
break;
}
@@ -3968,6 +3894,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
if ((adev->gfx.me_fw_version >= 0x00000046) &&
(adev->gfx.me_feature_version >= 27) &&
(adev->gfx.pfp_fw_version >= 0x00000068) &&
@@ -3981,7 +3908,9 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
adev->gfx.cp_fw_write_wait = true;
break;
default:
@@ -4102,11 +4031,15 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 3):
chip_name = "yellow_carp";
break;
+ case IP_VERSION(10, 3, 6):
+ chip_name = "gc_10_3_6";
+ break;
case IP_VERSION(10, 1, 3):
- if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
- chip_name = "cyan_skillfish2";
- else
- chip_name = "cyan_skillfish";
+ case IP_VERSION(10, 1, 4):
+ chip_name = "cyan_skillfish2";
+ break;
+ case IP_VERSION(10, 3, 7):
+ chip_name = "gc_10_3_7";
break;
default:
BUG();
@@ -4448,6 +4381,30 @@ static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.cp_table_ptr);
}
+static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
+{
+ struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
+ reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
+ reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
+ reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
+ reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
+ reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ reg_access_ctrl->spare_int =
+ SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid);
+ break;
+ default:
+ reg_access_ctrl->spare_int =
+ SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
+ break;
+ }
+ adev->gfx.rlc.rlcg_reg_access_supported = true;
+}
+
static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
{
const struct cs_section_def *cs_data;
@@ -4468,6 +4425,7 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -4678,7 +4636,9 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4689,6 +4649,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
break;
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4801,6 +4762,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -4813,7 +4775,9 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -4865,10 +4829,14 @@ static int gfx_v10_0_sw_init(void *handle)
if (r)
return r;
- r = gfx_v10_0_rlc_init(adev);
- if (r) {
- DRM_ERROR("Failed to init rlc BOs!\n");
- return r;
+ if (adev->gfx.rlc.funcs) {
+ if (adev->gfx.rlc.funcs->init) {
+ r = adev->gfx.rlc.funcs->init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to init rlc BOs!\n");
+ return r;
+ }
+ }
}
r = gfx_v10_0_mec_init(adev);
@@ -5047,7 +5015,8 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
bitmap = i * adev->gfx.config.max_sh_per_se + j;
if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) ||
- (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) &&
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) &&
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
continue;
gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
@@ -6321,7 +6290,9 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index);
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
@@ -6458,7 +6429,9 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0);
break;
default:
@@ -6472,7 +6445,9 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
@@ -6570,7 +6545,9 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
@@ -7300,6 +7277,8 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
break;
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
return true;
default:
data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
@@ -7334,7 +7313,9 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
/* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
@@ -7654,6 +7635,7 @@ static int gfx_v10_0_soft_reset(void *handle)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid))
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
@@ -7721,6 +7703,21 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
+ case IP_VERSION(10, 3, 6):
+ preempt_disable();
+ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_GC_10_3_6);
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_GC_10_3_6);
+ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_GC_10_3_6);
+ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
+ * roughly every 42 seconds.
+ */
+ if (hi_check != clock_hi) {
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_GC_10_3_6);
+ clock_hi = hi_check;
+ }
+ preempt_enable();
+ clock = clock_lo | (clock_hi << 32ULL);
+ break;
default:
preempt_disable();
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER);
@@ -7778,6 +7775,7 @@ static int gfx_v10_0_early_init(void *handle)
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
break;
case IP_VERSION(10, 3, 0):
@@ -7785,7 +7783,9 @@ static int gfx_v10_0_early_init(void *handle)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid;
break;
default:
@@ -7801,6 +7801,9 @@ static int gfx_v10_0_early_init(void *handle)
gfx_v10_0_set_gds_init(adev);
gfx_v10_0_set_rlc_funcs(adev);
+ /* init rlcg reg access ctrl */
+ gfx_v10_0_init_rlcg_reg_access_ctrl(adev);
+
return 0;
}
@@ -7843,7 +7846,9 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
/* wait for RLC_SAFE_MODE */
@@ -7879,7 +7884,9 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
break;
default:
@@ -8333,6 +8340,8 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
break;
@@ -8377,8 +8386,6 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
.reset = gfx_v10_0_rlc_reset,
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
- .sriov_wreg = gfx_v10_sriov_wreg,
- .sriov_rreg = gfx_v10_sriov_rreg,
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
@@ -8403,6 +8410,8 @@ static int gfx_v10_0_set_powergating_state(void *handle,
break;
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
gfx_v10_cntl_pg(adev, enable);
amdgpu_gfx_off_ctrl(adev, enable);
break;
@@ -8429,7 +8438,9 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
gfx_v10_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -9366,6 +9377,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v10_0_ring_get_rptr_gfx,
.get_wptr = gfx_v10_0_ring_get_wptr_gfx,
@@ -9537,11 +9549,14 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
case IP_VERSION(10, 1, 2):
@@ -9634,7 +9649,9 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
bitmap = i * adev->gfx.config.max_sh_per_se + j;
if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) ||
- (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) &&
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 7))) &&
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
continue;
mask = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 9189fb85a4dd..46d4bf27ebbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -63,13 +63,6 @@
#define mmGCEA_PROBE_MAP 0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX 0
-#define GFX9_RLCG_GC_WRITE_OLD (0x8 << 28)
-#define GFX9_RLCG_GC_WRITE (0x0 << 28)
-#define GFX9_RLCG_GC_READ (0x1 << 28)
-#define GFX9_RLCG_VFGATE_DISABLED 0x4000000
-#define GFX9_RLCG_WRONG_OPERATION_TYPE 0x2000000
-#define GFX9_RLCG_NOT_IN_RANGE 0x1000000
-
MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega10_me.bin");
@@ -746,128 +739,6 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
-static u32 gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
-{
- static void *scratch_reg0;
- static void *scratch_reg1;
- static void *scratch_reg2;
- static void *scratch_reg3;
- static void *spare_int;
- static uint32_t grbm_cntl;
- static uint32_t grbm_idx;
- uint32_t i = 0;
- uint32_t retries = 50000;
- u32 ret = 0;
- u32 tmp;
-
- scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
- scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
- scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG2_BASE_IDX] + mmSCRATCH_REG2)*4;
- scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG3_BASE_IDX] + mmSCRATCH_REG3)*4;
- spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
-
- grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
- grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
-
- if (offset == grbm_cntl || offset == grbm_idx) {
- if (offset == grbm_cntl)
- writel(v, scratch_reg2);
- else if (offset == grbm_idx)
- writel(v, scratch_reg3);
-
- writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
- } else {
- /*
- * SCRATCH_REG0 = read/write value
- * SCRATCH_REG1[30:28] = command
- * SCRATCH_REG1[19:0] = address in dword
- * SCRATCH_REG1[26:24] = Error reporting
- */
- writel(v, scratch_reg0);
- writel(offset | flag, scratch_reg1);
- writel(1, spare_int);
-
- for (i = 0; i < retries; i++) {
- tmp = readl(scratch_reg1);
- if (!(tmp & flag))
- break;
-
- udelay(10);
- }
-
- if (i >= retries) {
- if (amdgpu_sriov_reg_indirect_gc(adev)) {
- if (tmp & GFX9_RLCG_VFGATE_DISABLED)
- pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
- else if (tmp & GFX9_RLCG_WRONG_OPERATION_TYPE)
- pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
- else if (tmp & GFX9_RLCG_NOT_IN_RANGE)
- pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
- else
- pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
- } else
- pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
- }
- }
-
- ret = readl(scratch_reg0);
-
- return ret;
-}
-
-static bool gfx_v9_0_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
- int write, u32 *rlcg_flag)
-{
-
- switch (hwip) {
- case GC_HWIP:
- if (amdgpu_sriov_reg_indirect_gc(adev)) {
- *rlcg_flag = write ? GFX9_RLCG_GC_WRITE : GFX9_RLCG_GC_READ;
-
- return true;
- /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
- } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
- *rlcg_flag = GFX9_RLCG_GC_WRITE_OLD;
- return true;
- }
-
- break;
- default:
- return false;
- }
-
- return false;
-}
-
-static u32 gfx_v9_0_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
-{
- u32 rlcg_flag;
-
- if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
- return gfx_v9_0_rlcg_rw(adev, offset, 0, rlcg_flag);
-
- if (acc_flags & AMDGPU_REGS_NO_KIQ)
- return RREG32_NO_KIQ(offset);
- else
- return RREG32(offset);
-}
-
-static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
- u32 value, u32 acc_flags, u32 hwip)
-{
- u32 rlcg_flag;
-
- if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
- gfx_v9_0_rlcg_rw(adev, offset, value, rlcg_flag);
- return;
- }
-
- if (acc_flags & AMDGPU_REGS_NO_KIQ)
- WREG32_NO_KIQ(offset, value);
- else
- WREG32(offset, value);
-}
-
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -882,7 +753,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
-static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
+static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
@@ -2008,6 +1879,21 @@ static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
return 4;
}
+static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
+{
+ struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
+ reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
+ reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
+ reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
+ reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
+ reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
+ reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
+ adev->gfx.rlc.rlcg_reg_access_supported = true;
+}
+
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
{
const struct cs_section_def *cs_data;
@@ -2197,12 +2083,16 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
};
-static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
- .ras_late_init = amdgpu_gfx_ras_late_init,
- .ras_fini = amdgpu_gfx_ras_fini,
- .ras_error_inject = &gfx_v9_0_ras_error_inject,
- .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
- .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
+const struct amdgpu_ras_block_hw_ops gfx_v9_0_ras_ops = {
+ .ras_error_inject = &gfx_v9_0_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
+};
+
+static struct amdgpu_gfx_ras gfx_v9_0_ras = {
+ .ras_block = {
+ .hw_ops = &gfx_v9_0_ras_ops,
+ },
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2231,7 +2121,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
DRM_INFO("fix gfx.config for vega12\n");
break;
case IP_VERSION(9, 4, 0):
- adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
+ adev->gfx.ras = &gfx_v9_0_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2258,7 +2148,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
case IP_VERSION(9, 4, 1):
- adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
+ adev->gfx.ras = &gfx_v9_4_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2279,7 +2169,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config |= 0x22010042;
break;
case IP_VERSION(9, 4, 2):
- adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
+ adev->gfx.ras = &gfx_v9_4_2_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2298,6 +2188,27 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
break;
}
+ if (adev->gfx.ras) {
+ err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block);
+ if (err) {
+ DRM_ERROR("Failed to register gfx ras block!\n");
+ return err;
+ }
+
+ strcpy(adev->gfx.ras->ras_block.ras_comm.name, "gfx");
+ adev->gfx.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
+ adev->gfx.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if = &adev->gfx.ras->ras_block.ras_comm;
+
+ /* If not define special ras_late_init function, use gfx default ras_late_init */
+ if (!adev->gfx.ras->ras_block.ras_late_init)
+ adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!adev->gfx.ras->ras_block.ras_cb)
+ adev->gfx.ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
+ }
+
adev->gfx.config.gb_addr_config = gb_addr_config;
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
@@ -2434,10 +2345,14 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
}
- r = adev->gfx.rlc.funcs->init(adev);
- if (r) {
- DRM_ERROR("Failed to init rlc BOs!\n");
- return r;
+ if (adev->gfx.rlc.funcs) {
+ if (adev->gfx.rlc.funcs->init) {
+ r = adev->gfx.rlc.funcs->init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to init rlc BOs!\n");
+ return r;
+ }
+ }
}
r = gfx_v9_0_mec_init(adev);
@@ -2513,10 +2428,6 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->ras_fini)
- adev->gfx.ras_funcs->ras_fini(adev);
-
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
for (i = 0; i < adev->gfx.num_compute_rings; i++)
@@ -4840,6 +4751,9 @@ static int gfx_v9_0_early_init(void *handle)
gfx_v9_0_set_gds_init(adev);
gfx_v9_0_set_rlc_funcs(adev);
+ /* init rlcg reg access ctrl */
+ gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
+
return 0;
}
@@ -4870,16 +4784,9 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->ras_late_init) {
- r = adev->gfx.ras_funcs->ras_late_init(adev);
- if (r)
- return r;
- }
-
- if (adev->gfx.ras_funcs &&
- adev->gfx.ras_funcs->enable_watchdog_timer)
- adev->gfx.ras_funcs->enable_watchdog_timer(adev);
+ if (adev->gfx.ras &&
+ adev->gfx.ras->enable_watchdog_timer)
+ adev->gfx.ras->enable_watchdog_timer(adev);
return 0;
}
@@ -5250,8 +5157,6 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.reset = gfx_v9_0_rlc_reset,
.start = gfx_v9_0_rlc_start,
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
- .sriov_wreg = gfx_v9_0_sriov_wreg,
- .sriov_rreg = gfx_v9_0_sriov_rreg,
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
@@ -6819,7 +6724,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
}
-static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
+static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -6828,7 +6733,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
uint32_t reg_value;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
- return -EINVAL;
+ return;
err_data->ue_count = 0;
err_data->ce_count = 0;
@@ -6857,8 +6762,6 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
mutex_unlock(&adev->grbm_idx_mutex);
gfx_v9_0_query_utc_edc_status(adev, err_data);
-
- return 0;
}
static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
@@ -6962,6 +6865,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_GFXHUB_0,
.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index b4789dfc2bb9..c67e387a97f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -863,7 +863,7 @@ static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+static void gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -872,7 +872,7 @@ static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
uint32_t reg_value;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
- return -EINVAL;
+ return;
err_data->ue_count = 0;
err_data->ce_count = 0;
@@ -903,7 +903,6 @@ static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
gfx_v9_4_query_utc_edc_status(adev, err_data);
- return 0;
}
static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
@@ -1029,11 +1028,16 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
mutex_unlock(&adev->grbm_idx_mutex);
}
-const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = {
- .ras_late_init = amdgpu_gfx_ras_late_init,
- .ras_fini = amdgpu_gfx_ras_fini,
- .ras_error_inject = &gfx_v9_4_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
- .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
- .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
+
+const struct amdgpu_ras_block_hw_ops gfx_v9_4_ras_ops = {
+ .ras_error_inject = &gfx_v9_4_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
+};
+
+struct amdgpu_gfx_ras gfx_v9_4_ras = {
+ .ras_block = {
+ .hw_ops = &gfx_v9_4_ras_ops,
+ },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index bdd16b568021..ca520a767267 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -24,6 +24,6 @@
#ifndef __GFX_V9_4_H__
#define __GFX_V9_4_H__
-extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs;
+extern struct amdgpu_gfx_ras gfx_v9_4_ras;
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index c4f37a161875..7653ebd0e67b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1641,14 +1641,14 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
+static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
- return -EINVAL;
+ return;
err_data->ue_count = 0;
err_data->ce_count = 0;
@@ -1661,7 +1661,6 @@ static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
- return 0;
}
static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev)
@@ -1931,13 +1930,17 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
mutex_unlock(&adev->grbm_idx_mutex);
}
-const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = {
- .ras_late_init = amdgpu_gfx_ras_late_init,
- .ras_fini = amdgpu_gfx_ras_fini,
- .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
- .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
- .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
- .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = {
+ .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
+ .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+};
+
+struct amdgpu_gfx_ras gfx_v9_4_2_ras = {
+ .ras_block = {
+ .hw_ops = &gfx_v9_4_2_ras_ops,
+ },
.enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
index 6db1f88509af..7584624b641c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -31,6 +31,6 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev);
-extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs;
+extern struct amdgpu_gfx_ras gfx_v9_4_2_ras;
#endif /* __GFX_V9_4_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index b4eddf6e98a6..ff738e9725ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
/* Get SA disabled bitmap from eFuse setting */
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
@@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
disabled_sa = tmp;
WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
+ break;
+ default:
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 222b1da9d601..3c1d440824a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -666,11 +666,27 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
- adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
+ adev->umc.ras = &umc_v8_7_ras;
break;
default:
break;
}
+ if (adev->umc.ras) {
+ amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
+
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
+ adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
+
+ /* If don't define special ras_late_init function, use default ras_late_init */
+ if (!adev->umc.ras->ras_block.ras_late_init)
+ adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!adev->umc.ras->ras_block.ras_cb)
+ adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
+ }
}
@@ -679,6 +695,7 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
switch (adev->ip_versions[MMHUB_HWIP][0]) {
case IP_VERSION(2, 3, 0):
case IP_VERSION(2, 4, 0):
+ case IP_VERSION(2, 4, 1):
adev->mmhub.funcs = &mmhub_v2_3_funcs;
break;
default:
@@ -695,7 +712,9 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
break;
default:
@@ -707,6 +726,7 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
static int gmc_v10_0_early_init(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_set_mmhub_funcs(adev);
@@ -722,6 +742,10 @@ static int gmc_v10_0_early_init(void *handle)
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ r = amdgpu_gmc_ras_early_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -864,12 +888,15 @@ static int gmc_v10_0_sw_init(void *handle)
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
adev->num_vmhubs = 2;
/*
* To fulfill 4-level page support,
@@ -921,7 +948,6 @@ static int gmc_v10_0_sw_init(void *handle)
return r;
amdgpu_gmc_get_vbios_allocations(adev);
- amdgpu_gmc_get_reserved_allocation(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -988,14 +1014,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
return -EINVAL;
}
- if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
- goto skip_pin_bo;
-
- r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- if (r)
- return r;
-
-skip_pin_bo:
+ amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1021,8 +1040,6 @@ skip_pin_bo:
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
- adev->gart.ready = true;
-
return 0;
}
@@ -1045,6 +1062,12 @@ static int gmc_v10_0_hw_init(void *handle)
if (r)
return r;
+ if (amdgpu_emu_mode == 1) {
+ r = amdgpu_gmc_vram_checking(adev);
+ if (r)
+ return r;
+ }
+
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
@@ -1142,6 +1165,10 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))
+ return;
+
adev->mmhub.funcs->get_clockgating(adev, flags);
if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index cd6c38e083d0..ec291d28edff 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -469,16 +469,14 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
{
uint64_t table_addr;
- int r, i;
u32 field;
+ int i;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- if (r)
- return r;
+ amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
@@ -558,7 +556,6 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
- adev->gart.ready = true;
return 0;
}
@@ -922,7 +919,10 @@ static int gmc_v6_0_hw_init(void *handle)
if (r)
return r;
- return r;
+ if (amdgpu_emu_mode == 1)
+ return amdgpu_gmc_vram_checking(adev);
+ else
+ return r;
}
static int gmc_v6_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ab8adbff9e2d..344d819b4c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -613,17 +613,14 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
{
uint64_t table_addr;
- int r, i;
u32 tmp, field;
+ int i;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- if (r)
- return r;
-
+ amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
/* Setup TLB control */
@@ -712,7 +709,6 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
- adev->gart.ready = true;
return 0;
}
@@ -1111,7 +1107,10 @@ static int gmc_v7_0_hw_init(void *handle)
if (r)
return r;
- return r;
+ if (amdgpu_emu_mode == 1)
+ return amdgpu_gmc_vram_checking(adev);
+ else
+ return r;
}
static int gmc_v7_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 054733838292..ca9841d5669f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -837,17 +837,14 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
{
uint64_t table_addr;
- int r, i;
u32 tmp, field;
+ int i;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- if (r)
- return r;
-
+ amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
/* Setup TLB control */
@@ -953,7 +950,6 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
- adev->gart.ready = true;
return 0;
}
@@ -1242,7 +1238,10 @@ static int gmc_v8_0_hw_init(void *handle)
if (r)
return r;
- return r;
+ if (amdgpu_emu_mode == 1)
+ return amdgpu_gmc_vram_checking(adev);
+ else
+ return r;
}
static int gmc_v8_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3a5efe969735..431742eb7811 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1204,7 +1204,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
- adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
+ adev->umc.ras = &umc_v6_1_ras;
break;
case IP_VERSION(6, 1, 2):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
@@ -1212,15 +1212,16 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
- adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
+ adev->umc.ras = &umc_v6_1_ras;
break;
case IP_VERSION(6, 7, 0):
- adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
+ adev->umc.max_ras_err_cnt_per_query =
+ UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
if (!adev->gmc.xgmi.connected_to_cpu)
- adev->umc.ras_funcs = &umc_v6_7_ras_funcs;
+ adev->umc.ras = &umc_v6_7_ras;
if (1 & adev->smuio.funcs->get_die_id(adev))
adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
else
@@ -1229,6 +1230,23 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
+
+ if (adev->umc.ras) {
+ amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
+
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
+ adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
+
+ /* If don't define special ras_late_init function, use default ras_late_init */
+ if (!adev->umc.ras->ras_block.ras_late_init)
+ adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!adev->umc.ras->ras_block.ras_cb)
+ adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
+ }
}
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
@@ -1250,18 +1268,27 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MMHUB_HWIP][0]) {
case IP_VERSION(9, 4, 0):
- adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
+ adev->mmhub.ras = &mmhub_v1_0_ras;
break;
case IP_VERSION(9, 4, 1):
- adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
+ adev->mmhub.ras = &mmhub_v9_4_ras;
break;
case IP_VERSION(9, 4, 2):
- adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
+ adev->mmhub.ras = &mmhub_v1_7_ras;
break;
default:
/* mmhub ras is not available */
break;
}
+
+ if (adev->mmhub.ras) {
+ amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block);
+
+ strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub");
+ adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
+ adev->mmhub.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if = &adev->mmhub.ras->ras_block.ras_comm;
+ }
}
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
@@ -1271,7 +1298,9 @@ static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
{
- adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs;
+ adev->hdp.ras = &hdp_v4_0_ras;
+ amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block);
+ adev->hdp.ras_if = &adev->hdp.ras->ras_block.ras_comm;
}
static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
@@ -1289,6 +1318,7 @@ static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
static int gmc_v9_0_early_init(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
@@ -1318,6 +1348,10 @@ static int gmc_v9_0_early_init(void *handle)
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ r = amdgpu_gmc_ras_early_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1344,13 +1378,13 @@ static int gmc_v9_0_late_init(void *handle)
}
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
- if (adev->mmhub.ras_funcs &&
- adev->mmhub.ras_funcs->reset_ras_error_count)
- adev->mmhub.ras_funcs->reset_ras_error_count(adev);
+ if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
+ adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
+ adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
- if (adev->hdp.ras_funcs &&
- adev->hdp.ras_funcs->reset_ras_error_count)
- adev->hdp.ras_funcs->reset_ras_error_count(adev);
+ if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
+ adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
+ adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
}
r = amdgpu_gmc_ras_late_init(adev);
@@ -1519,7 +1553,7 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
static int gmc_v9_0_sw_init(void *handle)
{
- int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfxhub.funcs->init(adev);
@@ -1635,12 +1669,13 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
- r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
+ dma_addr_bits = adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ? 48:44;
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
if (r) {
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
return r;
}
- adev->need_swiotlb = drm_need_swiotlb(44);
+ adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
r = gmc_v9_0_mc_init(adev);
if (r)
@@ -1754,14 +1789,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
return -EINVAL;
}
- if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
- goto skip_pin_bo;
-
- r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- if (r)
- return r;
-
-skip_pin_bo:
+ amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1778,7 +1806,6 @@ skip_pin_bo:
DRM_INFO("PTB located at 0x%016llX\n",
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
- adev->gart.ready = true;
return 0;
}
@@ -1786,7 +1813,7 @@ static int gmc_v9_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool value;
- int i;
+ int i, r;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -1821,7 +1848,14 @@ static int gmc_v9_0_hw_init(void *handle)
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
- return gmc_v9_0_gart_enable(adev);
+ r = gmc_v9_0_gart_enable(adev);
+ if (r)
+ return r;
+
+ if (amdgpu_emu_mode == 1)
+ return amdgpu_gmc_vram_checking(adev);
+ else
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index eecfb1545c1e..046216635262 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -146,17 +146,29 @@ static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+ if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
+ WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, READ_BUFFER_WATERMARK, 2);
+
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
}
-const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs = {
- .ras_late_init = amdgpu_hdp_ras_late_init,
- .ras_fini = amdgpu_hdp_ras_fini,
+struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = {
.query_ras_error_count = hdp_v4_0_query_ras_error_count,
.reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
};
+struct amdgpu_hdp_ras hdp_v4_0_ras = {
+ .ras_block = {
+ .ras_comm = {
+ .name = "hdp",
+ .block = AMDGPU_RAS_BLOCK__HDP,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ },
+ .hw_ops = &hdp_v4_0_ras_hw_ops,
+ },
+};
+
const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
.flush_hdp = hdp_v4_0_flush_hdp,
.invalidate_hdp = hdp_v4_0_invalidate_hdp,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
index dc3a1b81dd62..c44eee9282ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
@@ -27,6 +27,6 @@
#include "soc15_common.h"
extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs;
-extern const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs;
+extern struct amdgpu_hdp_ras hdp_v4_0_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 01c242c5abc3..41a00851b6c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -50,11 +50,16 @@ static int jpeg_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type != CHIP_YELLOW_CARP) {
- u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
+ u32 harvest;
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(3, 1, 1):
+ break;
+ default:
+ harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
return -ENOENT;
+ break;
}
adev->jpeg.num_jpeg_inst = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
index 8f7107d392af..d4bd7d1d2649 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
@@ -37,24 +37,36 @@ static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev)
+static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj,
+ enum amdgpu_ras_block block, uint32_t sub_block_index)
{
- return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0);
-}
+ if (!block_obj)
+ return -EINVAL;
-static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev)
-{
- amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
+ if ((block_obj->ras_comm.block == block) &&
+ (block_obj->ras_comm.sub_block_index == sub_block_index)) {
+ return 0;
+ }
+
+ return -EINVAL;
}
-const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
- .ras_late_init = mca_v3_0_mp0_ras_late_init,
- .ras_fini = mca_v3_0_mp0_ras_fini,
+const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MCA,
- .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0,
- .sysfs_name = "mp0_err_count",
+};
+
+struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = {
+ .ras_block = {
+ .ras_comm = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .name = "mp0",
+ },
+ .hw_ops = &mca_v3_0_mp0_hw_ops,
+ .ras_block_match = mca_v3_0_ras_block_match,
+ },
};
static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
@@ -65,24 +77,22 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev)
-{
- return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1);
-}
-
-static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev)
-{
- amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
-}
-
-const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
- .ras_late_init = mca_v3_0_mp1_ras_late_init,
- .ras_fini = mca_v3_0_mp1_ras_fini,
+const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MCA,
- .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1,
- .sysfs_name = "mp1_err_count",
+};
+
+struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = {
+ .ras_block = {
+ .ras_comm = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .name = "mp1",
+ },
+ .hw_ops = &mca_v3_0_mp1_hw_ops,
+ .ras_block_match = mca_v3_0_ras_block_match,
+ },
};
static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
@@ -93,24 +103,22 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev)
-{
- return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio);
-}
-
-static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev)
-{
- amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
-}
-
-const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
- .ras_late_init = mca_v3_0_mpio_ras_late_init,
- .ras_fini = mca_v3_0_mpio_ras_fini,
+const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MCA,
- .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO,
- .sysfs_name = "mpio_err_count",
+};
+
+struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = {
+ .ras_block = {
+ .ras_comm = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .name = "mpio",
+ },
+ .hw_ops = &mca_v3_0_mpio_hw_ops,
+ .ras_block_match = mca_v3_0_ras_block_match,
+ },
};
@@ -118,9 +126,15 @@ static void mca_v3_0_init(struct amdgpu_device *adev)
{
struct amdgpu_mca *mca = &adev->mca;
- mca->mp0.ras_funcs = &mca_v3_0_mp0_ras_funcs;
- mca->mp1.ras_funcs = &mca_v3_0_mp1_ras_funcs;
- mca->mpio.ras_funcs = &mca_v3_0_mpio_ras_funcs;
+ mca->mp0.ras = &mca_v3_0_mp0_ras;
+ mca->mp1.ras = &mca_v3_0_mp1_ras;
+ mca->mpio.ras = &mca_v3_0_mpio_ras;
+ amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block);
+ amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block);
+ amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block);
+ mca->mp0.ras_if = &mca->mp0.ras->ras_block.ras_comm;
+ mca->mp1.ras_if = &mca->mp1.ras->ras_block.ras_comm;
+ mca->mpio.ras_if = &mca->mpio.ras->ras_block.ras_comm;
}
const struct amdgpu_mca_funcs mca_v3_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 1da2ec692057..4c9f0c0f3116 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -774,13 +774,17 @@ static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
}
}
-const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = {
- .ras_late_init = amdgpu_mmhub_ras_late_init,
- .ras_fini = amdgpu_mmhub_ras_fini,
+struct amdgpu_ras_block_hw_ops mmhub_v1_0_ras_hw_ops = {
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
};
+struct amdgpu_mmhub_ras mmhub_v1_0_ras = {
+ .ras_block = {
+ .hw_ops = &mmhub_v1_0_ras_hw_ops,
+ },
+};
+
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.get_fb_location = mmhub_v1_0_get_fb_location,
.init = mmhub_v1_0_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index 4661b094e007..dae7ca48bd8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -24,6 +24,6 @@
#define __MMHUB_V1_0_H__
extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
-extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs;
+extern struct amdgpu_mmhub_ras mmhub_v1_0_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index f5f7181f9af5..3b901f941627 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -1321,15 +1321,19 @@ static void mmhub_v1_7_reset_ras_error_status(struct amdgpu_device *adev)
}
}
-const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = {
- .ras_late_init = amdgpu_mmhub_ras_late_init,
- .ras_fini = amdgpu_mmhub_ras_fini,
+struct amdgpu_ras_block_hw_ops mmhub_v1_7_ras_hw_ops = {
.query_ras_error_count = mmhub_v1_7_query_ras_error_count,
.reset_ras_error_count = mmhub_v1_7_reset_ras_error_count,
.query_ras_error_status = mmhub_v1_7_query_ras_error_status,
.reset_ras_error_status = mmhub_v1_7_reset_ras_error_status,
};
+struct amdgpu_mmhub_ras mmhub_v1_7_ras = {
+ .ras_block = {
+ .hw_ops = &mmhub_v1_7_ras_hw_ops,
+ },
+};
+
const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
.get_fb_location = mmhub_v1_7_get_fb_location,
.init = mmhub_v1_7_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
index a7f9dfc24697..629f49052137 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
@@ -24,6 +24,6 @@
#define __MMHUB_V1_7_H__
extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs;
-extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs;
+extern struct amdgpu_mmhub_ras mmhub_v1_7_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 9e16da28505a..1957fb098c4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -93,6 +93,7 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (adev->ip_versions[MMHUB_HWIP][0]) {
case IP_VERSION(2, 3, 0):
case IP_VERSION(2, 4, 0):
+ case IP_VERSION(2, 4, 1):
mmhub_cid = mmhub_client_ids_vangogh[cid][rw];
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index ff49eeaf7882..619106f7d23d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1655,14 +1655,18 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
}
}
-const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs = {
- .ras_late_init = amdgpu_mmhub_ras_late_init,
- .ras_fini = amdgpu_mmhub_ras_fini,
+const struct amdgpu_ras_block_hw_ops mmhub_v9_4_ras_hw_ops = {
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
.reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
.query_ras_error_status = mmhub_v9_4_query_ras_error_status,
};
+struct amdgpu_mmhub_ras mmhub_v9_4_ras = {
+ .ras_block = {
+ .hw_ops = &mmhub_v9_4_ras_hw_ops,
+ },
+};
+
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.get_fb_location = mmhub_v9_4_get_fb_location,
.init = mmhub_v9_4_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
index 90436efa92ef..a48329d95f71 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -24,6 +24,6 @@
#define __MMHUB_V9_4_H__
extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
-extern const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs;
+extern struct amdgpu_mmhub_ras mmhub_v9_4_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 531cfba759dd..7b63d30b9b79 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -546,7 +546,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
{
int r;
- /* trigger gpu-reset by hypervisor only if TDR disbaled */
+ /* trigger gpu-reset by hypervisor only if TDR disabled */
if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index 3444332ea110..6f81de6f3cc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -59,10 +59,16 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
- if (adev->asic_type == CHIP_YELLOW_CARP)
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 0):
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC);
- else
+ break;
+ default:
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+ break;
+ }
tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
@@ -72,20 +78,26 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)
static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
- if (enable)
- if (adev->asic_type == CHIP_YELLOW_CARP)
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 0):
+ if (enable)
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC,
BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
else
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0);
+ break;
+ default:
+ if (enable)
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
- else
- if (adev->asic_type == CHIP_YELLOW_CARP)
- WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0);
else
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
+ break;
+ }
}
static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev)
@@ -250,7 +262,10 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
{
uint32_t def, data;
- if (adev->asic_type == CHIP_YELLOW_CARP) {
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 0):
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
@@ -260,8 +275,8 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
- data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1));
- def = data;
+ def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0,
+ regBIF1_PCIE_TX_POWER_CTRL_1));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
data |= (BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
@@ -272,7 +287,8 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1),
data);
- } else {
+ break;
+ default:
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
@@ -285,6 +301,7 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
+ break;
}
}
@@ -352,7 +369,10 @@ const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = {
static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
- if (adev->asic_type == CHIP_YELLOW_CARP) {
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 0):
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3));
data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
@@ -361,7 +381,8 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
- } else {
+ break;
+ default:
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
@@ -370,6 +391,7 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
+ break;
}
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index dc5e93756fea..c2357e83a8c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -362,9 +362,24 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
+ uint32_t baco_cntl;
+
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
+
+ if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4) &&
+ !amdgpu_sriov_vf(adev)) {
+ baco_cntl = RREG32_SOC15(NBIO, 0, mmBACO_CNTL);
+ if (baco_cntl &
+ (BACO_CNTL__BACO_DUMMY_EN_MASK | BACO_CNTL__BACO_EN_MASK)) {
+ baco_cntl &= ~(BACO_CNTL__BACO_DUMMY_EN_MASK |
+ BACO_CNTL__BACO_EN_MASK);
+ dev_dbg(adev->dev, "Unsetting baco dummy mode %x",
+ baco_cntl);
+ WREG32_SOC15(NBIO, 0, mmBACO_CNTL, baco_cntl);
+ }
+ }
}
static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
@@ -658,16 +673,27 @@ static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
-const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = {
+const struct amdgpu_ras_block_hw_ops nbio_v7_4_ras_hw_ops = {
+ .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+};
+
+struct amdgpu_nbio_ras nbio_v7_4_ras = {
+ .ras_block = {
+ .ras_comm = {
+ .name = "pcie_bif",
+ .block = AMDGPU_RAS_BLOCK__PCIE_BIF,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ },
+ .hw_ops = &nbio_v7_4_ras_hw_ops,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
+ },
.handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
.init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
.init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
- .query_ras_error_count = nbio_v7_4_query_ras_error_count,
- .ras_late_init = amdgpu_nbio_ras_late_init,
- .ras_fini = amdgpu_nbio_ras_fini,
};
+
static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index cc5692db6f98..7490022d79d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -29,6 +29,6 @@
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
-extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs;
+extern struct amdgpu_nbio_ras nbio_v7_4_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 2ec1ffb36b1f..e19f14c3ef59 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -204,6 +204,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
*codecs = &sc_video_codecs_decode;
return 0;
case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 1, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
else
@@ -258,21 +259,6 @@ static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
return amdgpu_device_indirect_rreg64(adev, address, data, reg);
}
-static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long flags, address, data;
- u32 r;
- address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
-
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg * 4);
- (void)RREG32(address);
- r = RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
-}
-
static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
unsigned long address, data;
@@ -283,21 +269,6 @@ static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
}
-static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long flags, address, data;
-
- address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
-
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg * 4);
- (void)RREG32(address);
- WREG32(data, v);
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-}
-
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -360,38 +331,6 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev)
return false;
}
-static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
- u8 *bios, u32 length_bytes)
-{
- u32 *dw_ptr;
- u32 i, length_dw;
- u32 rom_index_offset, rom_data_offset;
-
- if (bios == NULL)
- return false;
- if (length_bytes == 0)
- return false;
- /* APU vbios image is part of sbios image */
- if (adev->flags & AMD_IS_APU)
- return false;
-
- dw_ptr = (u32 *)bios;
- length_dw = ALIGN(length_bytes, 4) / 4;
-
- rom_index_offset =
- adev->smuio.funcs->get_rom_index_offset(adev);
- rom_data_offset =
- adev->smuio.funcs->get_rom_data_offset(adev);
-
- /* set rom index to 0 */
- WREG32(rom_index_offset, 0);
- /* read out the rom data */
- for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(rom_data_offset);
-
- return true;
-}
-
static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
@@ -515,6 +454,8 @@ nv_asic_reset_method(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 8):
return AMD_RESET_METHOD_MODE2;
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
@@ -584,7 +525,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
static void nv_program_aspm(struct amdgpu_device *adev)
{
- if (!amdgpu_aspm)
+ if (!amdgpu_device_should_use_aspm(adev))
return;
if (!(adev->flags & AMD_IS_APU) &&
@@ -699,7 +640,8 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
if (!(adev->flags & AMD_IS_APU) &&
- (adev->nbio.funcs->enable_aspm))
+ (adev->nbio.funcs->enable_aspm) &&
+ amdgpu_device_should_use_aspm(adev))
adev->nbio.funcs->enable_aspm(adev, !enter);
return 0;
@@ -708,7 +650,7 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
static const struct amdgpu_asic_funcs nv_asic_funcs =
{
.read_disabled_bios = &nv_read_disabled_bios,
- .read_bios_from_rom = &nv_read_bios_from_rom,
+ .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &nv_read_register,
.reset = &nv_asic_reset,
.reset_method = &nv_asic_reset_method,
@@ -742,8 +684,8 @@ static int nv_common_early_init(void *handle)
adev->pcie_wreg = &nv_pcie_wreg;
adev->pcie_rreg64 = &nv_pcie_rreg64;
adev->pcie_wreg64 = &nv_pcie_wreg64;
- adev->pciep_rreg = &nv_pcie_port_rreg;
- adev->pciep_wreg = &nv_pcie_port_wreg;
+ adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
+ adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
/* TODO: will add them during VCN v2 implementation */
adev->uvd_ctx_rreg = NULL;
@@ -964,10 +906,63 @@ static int nv_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x01;
break;
case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x82;
break;
+ case IP_VERSION(10, 3, 6):
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG;
+ adev->external_rev_id = adev->rev_id + 0x01;
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG |
+ AMD_PG_SUPPORT_GFX_PG;
+ adev->external_rev_id = adev->rev_id + 0x01;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index dd0dce254901..1f276ddd26e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -258,6 +258,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
+ GFX_FW_TYPE_CAP = 62, /* CAP_FW */
GFX_FW_TYPE_REG_LIST = 67, /* REG_LIST MI */
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index d0e76b36d4ab..9518b4394a6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -53,11 +53,13 @@ MODULE_FIRMWARE("amdgpu/navi14_ta.bin");
MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
MODULE_FIRMWARE("amdgpu/navi12_ta.bin");
+MODULE_FIRMWARE("amdgpu/navi12_cap.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_cap.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
MODULE_FIRMWARE("amdgpu/vangogh_asd.bin");
@@ -177,8 +179,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
err = psp_init_asd_microcode(psp, chip_name);
if (err)
return err;
- if (amdgpu_sriov_vf(adev))
- break;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 17655bc6d2f1..024f60631faf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -31,9 +31,16 @@
MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_cap.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_asd.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_toc.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_5_asd.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_5_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_8_asd.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -55,6 +62,12 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 3):
chip_name = "yellow_carp";
break;
+ case IP_VERSION(13, 0, 5):
+ chip_name = "psp_13_0_5";
+ break;
+ case IP_VERSION(13, 0, 8):
+ chip_name = "psp_13_0_8";
+ break;
default:
BUG();
}
@@ -69,6 +82,8 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 8):
err = psp_init_asd_microcode(psp, chip_name);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 1ed357cb0f49..01f3bcc62a6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -44,6 +44,7 @@
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
+MODULE_FIRMWARE("amdgpu/vega10_cap.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 4509bd4cce2d..1d8bbcbd7a37 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -1142,6 +1142,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = false,
+ .secure_submission_supported = true,
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 135727b59c41..4ef4feff5649 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1580,6 +1580,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = false,
+ .secure_submission_supported = true,
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e8e4749e9c79..d7e8f7232364 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1885,22 +1885,16 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
static int sdma_v4_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_ih_if ih_info = {
- .cb = sdma_v4_0_process_ras_data_cb,
- };
sdma_v4_0_setup_ulv(adev);
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
- if (adev->sdma.funcs &&
- adev->sdma.funcs->reset_ras_error_count)
- adev->sdma.funcs->reset_ras_error_count(adev);
+ if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
+ adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
+ adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
}
- if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
- return adev->sdma.funcs->ras_late_init(adev, &ih_info);
- else
- return 0;
+ return 0;
}
static int sdma_v4_0_sw_init(void *handle)
@@ -2001,9 +1995,6 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (adev->sdma.funcs && adev->sdma.funcs->ras_fini)
- adev->sdma.funcs->ras_fini(adev);
-
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
if (adev->sdma.has_page_queue)
@@ -2057,6 +2048,10 @@ static int sdma_v4_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* SMU saves SDMA state for us */
+ if (adev->in_s0ix)
+ return 0;
+
return sdma_v4_0_hw_fini(adev);
}
@@ -2064,6 +2059,10 @@ static int sdma_v4_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* SMU restores SDMA state for us */
+ if (adev->in_s0ix)
+ return 0;
+
return sdma_v4_0_hw_init(adev);
}
@@ -2415,6 +2414,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
@@ -2451,6 +2451,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_1,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
@@ -2483,6 +2484,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_page_ring_get_wptr,
@@ -2515,6 +2517,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_1,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_page_ring_get_wptr,
@@ -2740,7 +2743,7 @@ static void sdma_v4_0_get_ras_error_count(uint32_t value,
}
}
-static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
+static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *adev,
uint32_t instance, void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -2762,6 +2765,18 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
};
+static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status)
+{
+ int i = 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) {
+ dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i);
+ return;
+ }
+ }
+}
+
static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
{
int i;
@@ -2773,26 +2788,48 @@ static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
}
}
-static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
- .ras_late_init = amdgpu_sdma_ras_late_init,
- .ras_fini = amdgpu_sdma_ras_fini,
+const struct amdgpu_ras_block_hw_ops sdma_v4_0_ras_hw_ops = {
.query_ras_error_count = sdma_v4_0_query_ras_error_count,
.reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
};
+static struct amdgpu_sdma_ras sdma_v4_0_ras = {
+ .ras_block = {
+ .hw_ops = &sdma_v4_0_ras_hw_ops,
+ .ras_cb = sdma_v4_0_process_ras_data_cb,
+ },
+};
+
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 2):
- adev->sdma.funcs = &sdma_v4_0_ras_funcs;
+ adev->sdma.ras = &sdma_v4_0_ras;
break;
case IP_VERSION(4, 4, 0):
- adev->sdma.funcs = &sdma_v4_4_ras_funcs;
+ adev->sdma.ras = &sdma_v4_4_ras;
break;
default:
break;
}
+
+ if (adev->sdma.ras) {
+ amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block);
+
+ strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma");
+ adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
+ adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm;
+
+ /* If don't define special ras_late_init function, use default ras_late_init */
+ if (!adev->sdma.ras->ras_block.ras_late_init)
+ adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!adev->sdma.ras->ras_block.ras_cb)
+ adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
+ }
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
index bf95007f0843..6f9895cdddb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
@@ -188,7 +188,7 @@ static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
}
}
-static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev,
+static int sdma_v4_4_query_ras_error_count_by_instance(struct amdgpu_device *adev,
uint32_t instance,
void *ras_error_status)
{
@@ -245,9 +245,26 @@ static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev)
}
}
-const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs = {
- .ras_late_init = amdgpu_sdma_ras_late_init,
- .ras_fini = amdgpu_sdma_ras_fini,
+static void sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status)
+{
+ int i = 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (sdma_v4_4_query_ras_error_count_by_instance(adev, i, ras_error_status)) {
+ dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i);
+ return;
+ }
+ }
+
+}
+
+const struct amdgpu_ras_block_hw_ops sdma_v4_4_ras_hw_ops = {
.query_ras_error_count = sdma_v4_4_query_ras_error_count,
.reset_ras_error_count = sdma_v4_4_reset_ras_error_count,
};
+
+struct amdgpu_sdma_ras sdma_v4_4_ras = {
+ .ras_block = {
+ .hw_ops = &sdma_v4_4_ras_hw_ops,
+ },
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h
index 74a6e5b5e949..a9f0c68359e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h
@@ -23,6 +23,6 @@
#ifndef __SDMA_V4_4_H__
#define __SDMA_V4_4_H__
-extern const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs;
+extern struct amdgpu_sdma_ras sdma_v4_4_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 81e033549dda..a8d49c005f73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -51,9 +51,6 @@ MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
-MODULE_FIRMWARE("amdgpu/cyan_skillfish_sdma.bin");
-MODULE_FIRMWARE("amdgpu/cyan_skillfish_sdma1.bin");
-
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
@@ -264,10 +261,7 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
chip_name = "navi12";
break;
case IP_VERSION(5, 0, 1):
- if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
- chip_name = "cyan_skillfish2";
- else
- chip_name = "cyan_skillfish";
+ chip_name = "cyan_skillfish2";
break;
default:
BUG();
@@ -1696,6 +1690,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_GFXHUB_0,
.get_rptr = sdma_v5_0_ring_get_rptr,
.get_wptr = sdma_v5_0_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index d3d6d5b045b8..824eace69884 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -51,6 +51,8 @@ MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin");
MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin");
MODULE_FIRMWARE("amdgpu/yellow_carp_sdma.bin");
+MODULE_FIRMWARE("amdgpu/sdma_5_2_6.bin");
+MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA3_REG_OFFSET 0x400
@@ -138,28 +140,34 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(5, 2, 0):
- chip_name = "sienna_cichlid";
+ chip_name = "sienna_cichlid_sdma";
break;
case IP_VERSION(5, 2, 2):
- chip_name = "navy_flounder";
+ chip_name = "navy_flounder_sdma";
break;
case IP_VERSION(5, 2, 1):
- chip_name = "vangogh";
+ chip_name = "vangogh_sdma";
break;
case IP_VERSION(5, 2, 4):
- chip_name = "dimgrey_cavefish";
+ chip_name = "dimgrey_cavefish_sdma";
break;
case IP_VERSION(5, 2, 5):
- chip_name = "beige_goby";
+ chip_name = "beige_goby_sdma";
break;
case IP_VERSION(5, 2, 3):
- chip_name = "yellow_carp";
+ chip_name = "yellow_carp_sdma";
+ break;
+ case IP_VERSION(5, 2, 6):
+ chip_name = "sdma_5_2_6";
+ break;
+ case IP_VERSION(5, 2, 7):
+ chip_name = "sdma_5_2_7";
break;
default:
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
if (err)
@@ -1617,6 +1625,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
case IP_VERSION(5, 2, 1):
case IP_VERSION(5, 2, 4):
case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
case IP_VERSION(5, 2, 3):
sdma_v5_2_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -1644,6 +1653,11 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
+ /* AMD_CG_SUPPORT_SDMA_MGCG */
+ data = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
+ if (!(data & SDMA0_CLK_CTRL__CGCG_EN_OVERRIDE_MASK))
+ *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
+
/* AMD_CG_SUPPORT_SDMA_LS */
data = RREG32_KIQ(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
@@ -1673,6 +1687,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_GFXHUB_0,
.get_rptr = sdma_v5_2_ring_get_rptr,
.get_wptr = sdma_v5_2_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index e6d2f74a7976..7f99e130acd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -2453,7 +2453,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
bool disable_clkreq = false;
- if (amdgpu_aspm == 0)
+ if (!amdgpu_device_should_use_aspm(adev))
return;
if (adev->flags & AMD_IS_APU)
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index 73ffa8fde3df..dd2d66090d23 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -26,6 +26,7 @@
#include "smu_v11_0_i2c.h"
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "soc15_common.h"
#include <drm/drm_fixed.h>
#include <drm/drm_drv.h>
@@ -43,11 +44,10 @@
#define I2C_X_RESTART BIT(31)
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
uint32_t reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_PWRMGT);
reg = REG_SET_FIELD(reg, SMUIO_PWRMGT, i2c_clk_gate_en, en ? 1 : 0);
@@ -75,7 +75,8 @@ static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
static int smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE, enable ? 1 : 0);
@@ -100,7 +101,8 @@ static int smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable)
static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
/* do */
{
RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_CLR_INTR);
@@ -110,7 +112,8 @@ static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control)
static void smu_v11_0_i2c_configure(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
uint32_t reg = 0;
reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_SLAVE_DISABLE, 1);
@@ -131,7 +134,8 @@ static void smu_v11_0_i2c_configure(struct i2c_adapter *control)
static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
/*
* Standard mode speed, These values are taken from SMUIO MAS,
@@ -154,7 +158,8 @@ static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control)
static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, u16 address)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
/* The IC_TAR::IC_TAR field is 10-bits wide.
* It takes a 7-bit or 10-bit addresses as an address,
@@ -165,7 +170,8 @@ static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, u16 address)
static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
uint32_t ret = I2C_OK;
uint32_t reg, reg_c_tx_abrt_source;
@@ -216,7 +222,8 @@ static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control)
static uint32_t smu_v11_0_i2c_poll_rx_status(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
uint32_t ret = I2C_OK;
uint32_t reg_ic_status, reg_c_tx_abrt_source;
@@ -262,7 +269,8 @@ static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control,
u16 address, u8 *data,
u32 numbytes, u32 i2c_flag)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
u32 bytes_sent, reg, ret = I2C_OK;
unsigned long timeout_counter;
@@ -360,7 +368,8 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
u16 address, u8 *data,
u32 numbytes, u32 i2c_flag)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
uint32_t bytes_received, ret = I2C_OK;
bytes_received = 0;
@@ -431,7 +440,8 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
static void smu_v11_0_i2c_abort(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
uint32_t reg = 0;
/* Enable I2C engine; */
@@ -447,7 +457,8 @@ static void smu_v11_0_i2c_abort(struct i2c_adapter *control)
static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
const uint32_t IDLE_TIMEOUT = 1024;
uint32_t timeout_count = 0;
@@ -508,7 +519,8 @@ static void smu_v11_0_i2c_init(struct i2c_adapter *control)
static void smu_v11_0_i2c_fini(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
u32 status, enable, en_stat;
int res;
@@ -543,7 +555,8 @@ static void smu_v11_0_i2c_fini(struct i2c_adapter *control)
static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
/* Send PPSMC_MSG_RequestI2CBus */
if (!amdgpu_dpm_smu_i2c_bus_access(adev, true))
@@ -554,7 +567,8 @@ static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control)
static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+ struct amdgpu_device *adev = smu_i2c->adev;
/* Send PPSMC_MSG_ReleaseI2CBus */
if (!amdgpu_dpm_smu_i2c_bus_access(adev, false))
@@ -587,16 +601,17 @@ static uint32_t smu_v11_0_i2c_write_data(struct i2c_adapter *control,
if (ret != I2C_OK)
DRM_ERROR("WriteI2CData() - I2C error occurred :%x", ret);
-
+
return ret;
}
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c);
+ struct amdgpu_device *adev = smu_i2c->adev;
- mutex_lock(&adev->pm.smu_i2c_mutex);
+ mutex_lock(&smu_i2c->mutex);
if (!smu_v11_0_i2c_bus_lock(i2c))
DRM_ERROR("Failed to lock the bus from SMU");
else
@@ -611,13 +626,14 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c);
+ struct amdgpu_device *adev = smu_i2c->adev;
if (!smu_v11_0_i2c_bus_unlock(i2c))
DRM_ERROR("Failed to unlock the bus from SMU");
else
adev->pm.bus_locked = false;
- mutex_unlock(&adev->pm.smu_i2c_mutex);
+ mutex_unlock(&smu_i2c->mutex);
}
static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
@@ -706,19 +722,26 @@ static const struct i2c_adapter_quirks smu_v11_0_i2c_control_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
-int smu_v11_0_i2c_control_init(struct i2c_adapter *control)
+int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0];
+ struct i2c_adapter *control = &smu_i2c->adapter;
int res;
- mutex_init(&adev->pm.smu_i2c_mutex);
+ smu_i2c->adev = adev;
+ smu_i2c->port = 0;
+ mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
control->class = I2C_CLASS_HWMON;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v11_0_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
control->quirks = &smu_v11_0_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
res = i2c_add_adapter(control);
if (res)
@@ -727,9 +750,13 @@ int smu_v11_0_i2c_control_init(struct i2c_adapter *control)
return res;
}
-void smu_v11_0_i2c_control_fini(struct i2c_adapter *control)
+void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev)
{
+ struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus;
+
i2c_del_adapter(control);
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
index 44467c05f642..96ad14288a0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
@@ -26,9 +26,9 @@
#include <linux/types.h>
-struct i2c_adapter;
+struct amdgpu_device;
-int smu_v11_0_i2c_control_init(struct i2c_adapter *control);
-void smu_v11_0_i2c_control_fini(struct i2c_adapter *control);
+int smu_v11_0_i2c_control_init(struct amdgpu_device *adev);
+void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0fc1747e4a70..3d0251ef8d79 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -375,39 +375,6 @@ static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
return false;
}
-static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
- u8 *bios, u32 length_bytes)
-{
- u32 *dw_ptr;
- u32 i, length_dw;
- uint32_t rom_index_offset;
- uint32_t rom_data_offset;
-
- if (bios == NULL)
- return false;
- if (length_bytes == 0)
- return false;
- /* APU vbios image is part of sbios image */
- if (adev->flags & AMD_IS_APU)
- return false;
-
- dw_ptr = (u32 *)bios;
- length_dw = ALIGN(length_bytes, 4) / 4;
-
- rom_index_offset =
- adev->smuio.funcs->get_rom_index_offset(adev);
- rom_data_offset =
- adev->smuio.funcs->get_rom_data_offset(adev);
-
- /* set rom index to 0 */
- WREG32(rom_index_offset, 0);
- /* read out the rom data */
- for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(rom_data_offset);
-
- return true;
-}
-
static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
@@ -619,8 +586,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
- !(adev->apu_flags & AMD_APU_IS_RAVEN2))
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -703,7 +670,7 @@ static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
static void soc15_program_aspm(struct amdgpu_device *adev)
{
- if (!amdgpu_aspm)
+ if (!amdgpu_device_should_use_aspm(adev))
return;
if (!(adev->flags & AMD_IS_APU) &&
@@ -886,6 +853,10 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
+ /* CP hangs in IGT reloading test on RN, reset to WA */
+ if (adev->asic_type == CHIP_RENOIR)
+ return true;
+
/* Just return false for soc15 GPUs. Reset does not seem to
* be necessary.
*/
@@ -925,7 +896,7 @@ static void soc15_pre_asic_init(struct amdgpu_device *adev)
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
- .read_bios_from_rom = &soc15_read_bios_from_rom,
+ .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
.reset_method = &soc15_asic_reset_method,
@@ -947,7 +918,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
static const struct amdgpu_asic_funcs vega20_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
- .read_bios_from_rom = &soc15_read_bios_from_rom,
+ .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
.reset_method = &soc15_asic_reset_method,
@@ -1114,8 +1085,11 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
+ /*
+ * MMHUB PG needs to be disabled for Picasso for
+ * stability reasons.
+ */
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
- AMD_PG_SUPPORT_MMHUB |
AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
@@ -1219,16 +1193,11 @@ static int soc15_common_early_init(void *handle)
static int soc15_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r = 0;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->ras_late_init)
- r = adev->nbio.ras_funcs->ras_late_init(adev);
-
- return r;
+ return 0;
}
static int soc15_common_sw_init(void *handle)
@@ -1249,10 +1218,6 @@ static int soc15_common_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->ras_fini)
- adev->nbio.ras_funcs->ras_fini(adev);
-
if (adev->df.funcs &&
adev->df.funcs->sw_fini)
adev->df.funcs->sw_fini(adev);
@@ -1318,11 +1283,11 @@ static int soc15_common_hw_fini(void *handle)
if (adev->nbio.ras_if &&
amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->init_ras_controller_interrupt)
+ if (adev->nbio.ras &&
+ adev->nbio.ras->init_ras_controller_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
- if (adev->nbio.ras_funcs &&
- adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt)
+ if (adev->nbio.ras &&
+ adev->nbio.ras->init_ras_err_event_athub_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 473767e03676..acce8c2e0328 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -28,13 +28,13 @@
#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
- ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_wreg) ? \
- adev->gfx.rlc.funcs->sriov_wreg(adev, reg, value, flag, hwip) : \
+ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
+ amdgpu_sriov_wreg(adev, reg, value, flag, hwip) : \
WREG32(reg, value))
#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
- ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_rreg) ? \
- adev->gfx.rlc.funcs->sriov_rreg(adev, reg, flag, hwip) : \
+ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
+ amdgpu_sriov_rreg(adev, reg, flag, hwip) : \
RREG32(reg))
#define WREG32_FIELD15(ip, idx, reg, field, val) \
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 5093826a43d1..509d8a1945eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -64,7 +64,8 @@ enum ta_ras_status {
TA_RAS_STATUS__ERROR_PCS_STATE_ERROR = 0xA016,
TA_RAS_STATUS__ERROR_PCS_STATE_HANG = 0xA017,
TA_RAS_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018,
- TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019,
+ TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A
};
enum ta_ras_block {
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 20b44983ac94..939cb203f7ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -300,7 +300,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
{
uint32_t lsb, mc_umc_status_addr;
uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
- struct eeprom_table_record *err_rec;
uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
if (adev->asic_type == CHIP_ARCTURUS) {
@@ -328,8 +327,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
- err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
@@ -348,18 +345,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
/* we only save ue error information currently, ce is skipped */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- err_rec->address = err_addr;
- /* page frame address is saved */
- err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
- err_rec->ts = (uint64_t)ktime_get_real_seconds();
- err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
- err_rec->cu = 0;
- err_rec->mem_channel = channel_index;
- err_rec->mcumc_id = umc_inst;
-
- err_data->err_addr_cnt++;
- }
+ == 1)
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
}
/* clear umc status */
@@ -465,10 +453,14 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
umc_v6_1_enable_umc_index_mode(adev);
}
-const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = {
- .err_cnt_init = umc_v6_1_err_cnt_init,
- .ras_late_init = amdgpu_umc_ras_late_init,
- .ras_fini = amdgpu_umc_ras_fini,
+const struct amdgpu_ras_block_hw_ops umc_v6_1_ras_hw_ops = {
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
};
+
+struct amdgpu_umc_ras umc_v6_1_ras = {
+ .ras_block = {
+ .hw_ops = &umc_v6_1_ras_hw_ops,
+ },
+ .err_cnt_init = umc_v6_1_err_cnt_init,
+}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
index 5dc36c730bb2..50c632eb4cc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
@@ -45,7 +45,7 @@
/* umc ce count initial value */
#define UMC_V6_1_CE_CNT_INIT (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD)
-extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs;
+extern struct amdgpu_umc_ras umc_v6_1_ras;
extern const uint32_t
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM];
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index 6dd1e19e8d43..c45d9c14ecbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -47,6 +47,13 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
uint32_t umc_inst,
uint32_t ch_inst)
{
+ uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst;
+
+ /* adjust umc and channel index offset,
+ * the register address is not linear on each umc instace */
+ umc_inst = index / 4;
+ ch_inst = index % 4;
+
return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
}
@@ -58,49 +65,74 @@ static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
}
static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
- uint32_t channel_index,
+ uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
- uint32_t ecc_err_cnt;
uint64_t mc_umc_status;
+ uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- /*
- * select the lower chip and check the error count
- * skip add error count, calc error counter only from mca_umc_status
- */
- ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip;
-
- /*
- * select the higher chip and check the err counter
- * skip add error count, calc error counter only from mca_umc_status
- */
- ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip;
-
+ eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
- mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
*error_count += 1;
}
static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
- uint32_t channel_index,
+ uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
+ uint32_t eccinfo_table_idx;
+ uint32_t umc_reg_offset;
+ uint32_t mc_umc_addr;
+ uint64_t reg_value;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst, ch_inst);
+
+ eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
/* check the MCUMC_STATUS */
- mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
*error_count += 1;
+
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
+ dev_info(adev->dev, "Deferred error, no user action is needed.\n");
+
+ if (mc_umc_status)
+ dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
+
+ /* print IPID registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_IPIDT0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA IPID 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+
+ /* print SYND registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_SYNDT0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA SYND 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+
+ /* print MISC0 registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_MISC0T0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA MISC0 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+ }
}
static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
@@ -110,42 +142,34 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
uint32_t umc_inst = 0;
uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
- uint32_t channel_index = 0;
/*TODO: driver needs to toggle DF Cstate to ensure
* safe access of UMC registers. Will add the protection */
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v6_7_reg_offset(adev,
- umc_inst,
- ch_inst);
- channel_index = get_umc_v6_7_channel_index(adev,
- umc_inst,
- ch_inst);
umc_v6_7_ecc_info_query_correctable_error_count(adev,
- channel_index,
+ umc_inst, ch_inst,
&(err_data->ce_count));
umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev,
- channel_index,
+ umc_inst, ch_inst,
&(err_data->ue_count));
}
}
static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- uint32_t umc_reg_offset,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, retired_page;
- struct eeprom_table_record *err_rec;
+ uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
uint32_t channel_index;
+ uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
channel_index =
adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
- mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
return;
@@ -153,34 +177,41 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
- err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr;
+ err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
/* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
+ /* The umc channel bits are not original values, they are hashed */
+ SET_CHANNEL_HASH(channel_index, soc_pa);
+
+ /* clear [C4 C3 C2] in soc physical address */
+ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
/* we only save ue error information currently, ce is skipped */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
== 1) {
- err_rec->address = err_addr;
- /* page frame address is saved */
- err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
- err_rec->ts = (uint64_t)ktime_get_real_seconds();
- err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
- err_rec->cu = 0;
- err_rec->mem_channel = channel_index;
- err_rec->mcumc_id = umc_inst;
-
- err_data->err_addr_cnt++;
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+
+ /* shift R14 bit */
+ retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+ }
}
}
}
@@ -192,18 +223,13 @@ static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev
uint32_t umc_inst = 0;
uint32_t ch_inst = 0;
- uint32_t umc_reg_offset = 0;
/*TODO: driver needs to toggle DF Cstate to ensure
* safe access of UMC resgisters. Will add the protection
* when firmware interface is ready */
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v6_7_reg_offset(adev,
- umc_inst,
- ch_inst);
umc_v6_7_ecc_info_query_error_address(adev,
err_data,
- umc_reg_offset,
ch_inst,
umc_inst);
}
@@ -261,6 +287,8 @@ static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev
{
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
+ uint32_t mc_umc_addr;
+ uint64_t reg_value;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -272,8 +300,36 @@ static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
*error_count += 1;
+
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
+ dev_info(adev->dev, "Deferred error, no user action is needed.\n");
+
+ if (mc_umc_status)
+ dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
+
+ /* print IPID registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_IPIDT0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA IPID 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+
+ /* print SYND registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_SYNDT0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA SYND 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+
+ /* print MISC0 registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_MISC0T0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA MISC0 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+ }
}
static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
@@ -365,9 +421,9 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
uint32_t umc_inst)
{
uint32_t mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
- struct eeprom_table_record *err_rec;
uint32_t channel_index;
+ uint64_t mc_umc_status, mc_umc_addrt0;
+ uint64_t err_addr, soc_pa, retired_page, column;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -385,8 +441,6 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
return;
}
- err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-
channel_index =
adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
@@ -399,23 +453,32 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
/* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
+ /* The umc channel bits are not original values, they are hashed */
+ SET_CHANNEL_HASH(channel_index, soc_pa);
+
+ /* clear [C4 C3 C2] in soc physical address */
+ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
/* we only save ue error information currently, ce is skipped */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
== 1) {
- err_rec->address = err_addr;
- /* page frame address is saved */
- err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
- err_rec->ts = (uint64_t)ktime_get_real_seconds();
- err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
- err_rec->cu = 0;
- err_rec->mem_channel = channel_index;
- err_rec->mcumc_id = umc_inst;
-
- err_data->err_addr_cnt++;
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+
+ /* shift R14 bit */
+ retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+ }
}
}
@@ -463,28 +526,24 @@ static uint32_t umc_v6_7_query_ras_poison_mode_per_channel(
static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev)
{
- uint32_t umc_inst = 0;
- uint32_t ch_inst = 0;
uint32_t umc_reg_offset = 0;
- LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_v6_7_reg_offset(adev,
- umc_inst,
- ch_inst);
- /* Enabling fatal error in one channel will be considered
- as fatal error mode */
- if (umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset))
- return false;
- }
-
- return true;
+ /* Enabling fatal error in umc instance0 channel0 will be
+ * considered as fatal error mode
+ */
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev, 0, 0);
+ return !umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
}
-const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
- .ras_late_init = amdgpu_umc_ras_late_init,
- .ras_fini = amdgpu_umc_ras_fini,
+const struct amdgpu_ras_block_hw_ops umc_v6_7_ras_hw_ops = {
.query_ras_error_count = umc_v6_7_query_ras_error_count,
.query_ras_error_address = umc_v6_7_query_ras_error_address,
+};
+
+struct amdgpu_umc_ras umc_v6_7_ras = {
+ .ras_block = {
+ .hw_ops = &umc_v6_7_ras_hw_ops,
+ },
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
index 57f2557e7aca..fe41ed2f5945 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
@@ -41,9 +41,32 @@
#define UMC_V6_7_CHANNEL_INSTANCE_NUM 8
/* total channel instances in one umc block */
#define UMC_V6_7_TOTAL_CHANNEL_NUM (UMC_V6_7_CHANNEL_INSTANCE_NUM * UMC_V6_7_UMC_INSTANCE_NUM)
+/* one piece of normalizing address is mapped to 8 pieces of physical address */
+#define UMC_V6_7_NA_MAP_PA_NUM 8
+/* R14 bit shift should be considered, double the number */
+#define UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL (UMC_V6_7_NA_MAP_PA_NUM * 2)
+/* The CH4 bit in SOC physical address */
+#define UMC_V6_7_PA_CH4_BIT 12
+/* The C2 bit in SOC physical address */
+#define UMC_V6_7_PA_C2_BIT 17
+/* The R14 bit in SOC physical address */
+#define UMC_V6_7_PA_R14_BIT 34
/* UMC regiser per channel offset */
#define UMC_V6_7_PER_CHANNEL_OFFSET 0x400
-extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs;
+
+/* XOR bit 20, 25, 34 of PA into CH4 bit (bit 12 of PA),
+ * hash bit is only effective when related setting is enabled
+ */
+#define CHANNEL_HASH(channel_idx, pa) (((channel_idx) >> 4) ^ \
+ (((pa) >> 20) & 0x1ULL & adev->df.hash_status.hash_64k) ^ \
+ (((pa) >> 25) & 0x1ULL & adev->df.hash_status.hash_2m) ^ \
+ (((pa) >> 34) & 0x1ULL & adev->df.hash_status.hash_1g))
+#define SET_CHANNEL_HASH(channel_idx, pa) do { \
+ (pa) &= ~(0x1ULL << UMC_V6_7_PA_CH4_BIT); \
+ (pa) |= (CHANNEL_HASH(channel_idx, pa) << UMC_V6_7_PA_CH4_BIT); \
+ } while (0)
+
+extern struct amdgpu_umc_ras umc_v6_7_ras;
extern const uint32_t
umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM];
extern const uint32_t
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index af59a35788e3..f35253e0eaa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -40,13 +40,137 @@ const uint32_t
{9, 0}, {15, 6}
};
-static inline uint32_t get_umc_8_reg_offset(struct amdgpu_device *adev,
+static inline uint32_t get_umc_v8_7_reg_offset(struct amdgpu_device *adev,
uint32_t umc_inst,
uint32_t ch_inst)
{
return adev->umc.channel_offs*ch_inst + UMC_8_INST_DIST*umc_inst;
}
+static void umc_v8_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_inst, uint32_t ch_inst,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t eccinfo_table_idx;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
+
+ /* check for SRAM correctable error
+ * MCUMC_STATUS is a 64 bit register
+ */
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+static void umc_v8_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_inst, uint32_t ch_inst,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t eccinfo_table_idx;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+
+ /* TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC registers. Will add the protection
+ */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_v8_7_ecc_info_query_correctable_error_count(adev,
+ umc_inst, ch_inst,
+ &(err_data->ce_count));
+ umc_v8_7_ecc_info_querry_uncorrectable_error_count(adev,
+ umc_inst, ch_inst,
+ &(err_data->ue_count));
+ }
+}
+
+static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
+{
+ uint64_t mc_umc_status, err_addr, retired_page;
+ uint32_t channel_index;
+ uint32_t eccinfo_table_idx;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+
+ if (mc_umc_status == 0)
+ return;
+
+ if (!err_data->err_addr)
+ return;
+
+ /* calculate error address if ue/ce error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+ err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1)
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+ }
+}
+
+static void umc_v8_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+
+ /* TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC resgisters. Will add the protection
+ * when firmware interface is ready
+ */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_v8_7_ecc_info_query_error_address(adev,
+ err_data,
+ ch_inst,
+ umc_inst);
+ }
+}
+
static void umc_v8_7_clear_error_count_per_channel(struct amdgpu_device *adev,
uint32_t umc_reg_offset)
{
@@ -92,7 +216,7 @@ static void umc_v8_7_clear_error_count(struct amdgpu_device *adev)
uint32_t umc_reg_offset = 0;
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_reg_offset = get_umc_v8_7_reg_offset(adev,
umc_inst,
ch_inst);
@@ -178,7 +302,7 @@ static void umc_v8_7_query_ras_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset = 0;
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_reg_offset = get_umc_v8_7_reg_offset(adev,
umc_inst,
ch_inst);
@@ -201,7 +325,6 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
{
uint32_t lsb, mc_umc_status_addr;
uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
- struct eeprom_table_record *err_rec;
uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
mc_umc_status_addr =
@@ -220,8 +343,6 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
return;
}
- err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
@@ -240,18 +361,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
/* we only save ue error information currently, ce is skipped */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- err_rec->address = err_addr;
- /* page frame address is saved */
- err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
- err_rec->ts = (uint64_t)ktime_get_real_seconds();
- err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
- err_rec->cu = 0;
- err_rec->mem_channel = channel_index;
- err_rec->mcumc_id = umc_inst;
-
- err_data->err_addr_cnt++;
- }
+ == 1)
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
}
/* clear umc status */
@@ -268,7 +380,7 @@ static void umc_v8_7_query_ras_error_address(struct amdgpu_device *adev,
uint32_t umc_reg_offset = 0;
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_reg_offset = get_umc_v8_7_reg_offset(adev,
umc_inst,
ch_inst);
@@ -316,7 +428,7 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
uint32_t umc_reg_offset = 0;
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
- umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_reg_offset = get_umc_v8_7_reg_offset(adev,
umc_inst,
ch_inst);
@@ -324,10 +436,16 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
}
}
-const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = {
- .err_cnt_init = umc_v8_7_err_cnt_init,
- .ras_late_init = amdgpu_umc_ras_late_init,
- .ras_fini = amdgpu_umc_ras_fini,
+const struct amdgpu_ras_block_hw_ops umc_v8_7_ras_hw_ops = {
.query_ras_error_count = umc_v8_7_query_ras_error_count,
.query_ras_error_address = umc_v8_7_query_ras_error_address,
};
+
+struct amdgpu_umc_ras umc_v8_7_ras = {
+ .ras_block = {
+ .hw_ops = &umc_v8_7_ras_hw_ops,
+ },
+ .err_cnt_init = umc_v8_7_err_cnt_init,
+ .ecc_info_query_ras_error_count = umc_v8_7_ecc_info_query_ras_error_count,
+ .ecc_info_query_ras_error_address = umc_v8_7_ecc_info_query_ras_error_address,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
index 37e6dc7c28e0..dd4993f5f78f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
@@ -44,7 +44,7 @@
/* umc ce count initial value */
#define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD)
-extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs;
+extern struct amdgpu_umc_ras umc_v8_7_ras;
extern const uint32_t
umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index b483f03b4591..2f15b8e0f7d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_uvd.h"
+#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
@@ -1275,14 +1276,15 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
* uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
*
* @p: the CS parser with the IBs
- * @ib_idx: which IB to patch
+ * @job: which job this ib is in
+ * @ib: which IB to patch
*
*/
static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
- uint32_t ib_idx)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
- struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
unsigned i;
/* No patching necessary for the first instance */
@@ -1290,12 +1292,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
for (i = 0; i < ib->length_dw; i += 2) {
- uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+ uint32_t reg = amdgpu_ib_get_value(ib, i);
reg -= p->adev->reg_offset[UVD_HWIP][0][1];
reg += p->adev->reg_offset[UVD_HWIP][1][1];
- amdgpu_set_ib_value(p, ib_idx, i, reg);
+ amdgpu_ib_set_value(ib, i, reg);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 3799226defc0..dff54190b96c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -148,6 +148,13 @@ static int vcn_v1_0_sw_init(void *handle)
adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
+ if (amdgpu_vcnfw_log) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+
+ fw_shared->present_flag_0 = 0;
+ amdgpu_vcn_fwlog_init(adev->vcn.inst);
+ }
+
r = jpeg_v1_0_sw_init(handle);
return r;
@@ -1095,13 +1102,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
static int vcn_v1_0_start(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- r = vcn_v1_0_start_dpg_mode(adev);
- else
- r = vcn_v1_0_start_spg_mode(adev);
- return r;
+ return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
+ vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev);
}
/**
@@ -1903,6 +1905,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 313fc1b53999..7a7f35e83dd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -172,8 +172,12 @@ static int vcn_v2_0_sw_init(void *handle)
if (r)
return r;
- fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+ fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(adev->vcn.inst);
+
return 0;
}
@@ -188,7 +192,7 @@ static int vcn_v2_0_sw_fini(void *handle)
{
int r, idx;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
fw_shared->present_flag_0 = 0;
@@ -364,9 +368,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
/* non-cache window */
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+ lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+ upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
@@ -455,10 +459,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/* non-cache window */
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
- lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
+ lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
- upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
+ upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
@@ -784,7 +788,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
@@ -921,7 +925,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
static int vcn_v2_0_start(struct amdgpu_device *adev)
{
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1094,8 +1098,10 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
{
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
+ vcn_v2_0_pause_dpg_mode(adev, 0, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1207,7 +1213,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -2003,6 +2009,7 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_0_dec_ring_get_rptr,
.get_wptr = vcn_v2_0_dec_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 44fc4c218433..1bf672966a62 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -196,8 +196,11 @@ static int vcn_v2_5_sw_init(void *handle)
return r;
}
- fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
+ fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
if (amdgpu_sriov_vf(adev)) {
@@ -229,7 +232,7 @@ static int vcn_v2_5_sw_fini(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
}
drm_dev_exit(idx);
@@ -423,9 +426,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
/* non-cache window */
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+ lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+ upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
@@ -513,10 +516,10 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/* non-cache window */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
- lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
- upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
@@ -757,7 +760,7 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -981,7 +984,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
vcn_v2_5_mc_resume(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
if (adev->vcn.harvest_config & (1 << i))
continue;
/* VCN global tiling registers */
@@ -1403,7 +1406,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
@@ -1512,6 +1515,7 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_1,
.get_rptr = vcn_v2_5_dec_ring_get_rptr,
.get_wptr = vcn_v2_5_dec_ring_get_wptr,
@@ -1542,6 +1546,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v2_5_dec_ring_get_rptr,
.get_wptr = vcn_v2_5_dec_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index da11ceba0698..c87263ed20ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_vcn.h"
#include "amdgpu_pm.h"
+#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v2_0.h"
@@ -213,11 +214,14 @@ static int vcn_v3_0_sw_init(void *handle)
return r;
}
- fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
if (amdgpu_sriov_vf(adev)) {
@@ -249,7 +253,7 @@ static int vcn_v3_0_sw_fini(void *handle)
if (adev->vcn.harvest_config & (1 << i))
continue;
- fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
fw_shared->sw_ring.is_enabled = false;
}
@@ -295,6 +299,7 @@ static int vcn_v3_0_hw_init(void *handle)
ring = &adev->vcn.inst[i].ring_dec;
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
ring->sched.ready = false;
+ ring->no_scheduler = true;
dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
} else {
ring->wptr = 0;
@@ -307,6 +312,7 @@ static int vcn_v3_0_hw_init(void *handle)
ring = &adev->vcn.inst[i].ring_enc[j];
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
ring->sched.ready = false;
+ ring->no_scheduler = true;
dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
} else {
ring->wptr = 0;
@@ -469,9 +475,9 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
/* non-cache window */
WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr));
+ lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr));
+ upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
@@ -558,10 +564,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/* non-cache window */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
- lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
- upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
@@ -923,7 +929,7 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -1220,7 +1226,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
- fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
/* programm the RB_BASE for ring buffer */
@@ -1611,7 +1617,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
/* Restore */
- fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
ring->wptr = 0;
@@ -1700,7 +1706,7 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
- fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr;
+ fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr;
fw_shared->rb.wptr = lower_32_bits(ring->wptr);
WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
lower_32_bits(ring->wptr));
@@ -1780,6 +1786,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0x3f,
.nop = VCN_DEC_SW_CMD_NO_OP,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
@@ -1806,21 +1813,23 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
/* The create msg must be in the first IB submitted */
- if (atomic_read(&p->entity->fence_seq))
+ if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
- drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
return 0;
}
-static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *map;
@@ -1891,7 +1900,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
- r = vcn_v3_0_limit_sched(p);
+ r = vcn_v3_0_limit_sched(p, job);
if (r)
goto out;
}
@@ -1902,10 +1911,10 @@ out:
}
static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
- uint32_t ib_idx)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
- struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
uint32_t msg_lo = 0, msg_hi = 0;
unsigned i;
int r;
@@ -1915,8 +1924,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
for (i = 0; i < ib->length_dw; i += 2) {
- uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
- uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1);
+ uint32_t reg = amdgpu_ib_get_value(ib, i);
+ uint32_t val = amdgpu_ib_get_value(ib, i + 1);
if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
msg_lo = val;
@@ -1924,7 +1933,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
msg_hi = val;
} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
val == 0) {
- r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+ r = vcn_v3_0_dec_msg(p, job,
+ ((u64)msg_hi) << 32 | msg_lo);
if (r)
return r;
}
@@ -1935,6 +1945,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
+ .secure_submission_supported = true,
.vmhub = AMDGPU_MMHUB_0,
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 6645ebbd2696..039b90cdc3bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1140,7 +1140,7 @@ static void vi_program_aspm(struct amdgpu_device *adev)
bool bL1SS = false;
bool bClkReqSupport = true;
- if (!amdgpu_aspm)
+ if (!amdgpu_device_should_use_aspm(adev))
return;
if (adev->flags & AMD_IS_APU ||