diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
42 files changed, 1961 insertions, 891 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 68e9f584c570..a51c5a960750 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -62,11 +62,13 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o amdgpu-y += \ - vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o + vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ + vega20_reg_init.o # add DF block amdgpu-y += \ - df_v1_7.o + df_v1_7.o \ + df_v3_6.o # add GMC block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 03a2c0be0bf2..a59c07590cee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1401,6 +1401,8 @@ struct amdgpu_df_funcs { bool enable); void (*get_clockgating_state)(struct amdgpu_device *adev, u32 *flags); + void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, + bool enable); }; /* Define the HW IP blocks will be used in driver , add more if necessary */ enum amd_hw_ip_block_type { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index a0f48cb9b8f0..236915849cfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -322,3 +322,47 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) return ret; } + +union gfx_info { + struct atom_gfx_info_v2_4 v24; +}; + +int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index; + uint8_t frev, crev; + uint16_t data_offset; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + gfx_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union gfx_info *gfx_info = (union gfx_info *) + (mode_info->atom_context->bios + data_offset); + switch (crev) { + case 4: + adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se; + adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh; + adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se; + adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se; + adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs; + adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs); + adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds; + adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth; + adev->gfx.config.gs_prim_buffer_depth = + le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth); + adev->gfx.config.double_offchip_lds_buf = + gfx_info->v24.gc_double_offchip_lds_buffer; + adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size); + adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd); + adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu; + adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size); + return 0; + default: + return -EINVAL; + } + + } + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index 7689c961c4ef..20f158fd3b76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -30,5 +30,6 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 5b3d3bf5b599..e950730f1933 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -400,6 +400,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_VEGA12: strcpy(fw_name, "amdgpu/vega12_smc.bin"); break; + case CHIP_VEGA20: + strcpy(fw_name, "amdgpu/vega20_smc.bin"); + break; default: DRM_ERROR("SMC firmware not supported\n"); return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a8e531d604fa..c5bb36275e93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -173,9 +173,14 @@ static void amdgpu_ctx_do_release(struct kref *ref) ctx = container_of(ref, struct amdgpu_ctx, refcount); - for (i = 0; i < ctx->adev->num_rings; i++) + for (i = 0; i < ctx->adev->num_rings; i++) { + + if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) + continue; + drm_sched_entity_fini(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); + } amdgpu_ctx_fini(ref); } @@ -452,12 +457,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) if (!ctx->adev) return; - for (i = 0; i < ctx->adev->num_rings; i++) + for (i = 0; i < ctx->adev->num_rings; i++) { + + if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) + continue; + if (kref_read(&ctx->refcount) == 1) drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); + } } } @@ -474,12 +484,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) if (!ctx->adev) return; - for (i = 0; i < ctx->adev->num_rings; i++) + for (i = 0; i < ctx->adev->num_rings; i++) { + + if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) + continue; + if (kref_read(&ctx->refcount) == 1) drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9fb20a53d5b2..290e279abf0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = { "VEGAM", "VEGA10", "VEGA12", + "VEGA20", "RAVEN", "LAST", }; @@ -1387,6 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_KABINI: case CHIP_MULLINS: #endif + case CHIP_VEGA20: default: return 0; case CHIP_VEGA10: @@ -1521,6 +1523,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) #endif case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: if (adev->asic_type == CHIP_RAVEN) adev->family = AMDGPU_FAMILY_RV; @@ -1715,6 +1718,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1814,6 +1818,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* ungate blocks before hw fini so that we can shutdown the blocks safely */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -2155,6 +2160,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_FIJI: case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: #endif @@ -3172,7 +3178,6 @@ error: int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force) { - struct drm_atomic_state *state = NULL; int i, r, resched; if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { @@ -3195,10 +3200,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - /* store modesetting */ - if (amdgpu_device_has_dc_support(adev)) - state = drm_atomic_helper_suspend(adev->ddev); - /* block all schedulers and reset given job's ring */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -3238,10 +3239,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, kthread_unpark(ring->sched.thread); } - if (amdgpu_device_has_dc_support(adev)) { - if (drm_atomic_helper_resume(adev->ddev, state)) - dev_info(adev->dev, "drm resume failed:%d\n", r); - } else { + if (!amdgpu_device_has_dc_support(adev)) { drm_helper_resume_force_mode(adev->ddev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 739e7e09c8b0..b0bf2f24da48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -560,6 +560,13 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, + /* Vega 20 */ + {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d09fcab2398f..39ec6b8890a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint64_t index; - if (ring != &adev->uvd.ring) { + if (ring != &adev->uvd.inst[ring->me].ring) { ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); } else { /* put fence directly behind firmware */ index = ALIGN(adev->uvd.fw->size, 8); - ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; - ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; + ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; + ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_irq_get(adev, irq_src, irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index eb4785e51573..91517b166a3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_crtc *crtc; uint32_t ui32 = 0; uint64_t ui64 = 0; - int i, found; + int i, j, found; int ui32_size = sizeof(ui32); if (!info->return_size || !info->return_pointer) @@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_UVD: type = AMD_IP_BLOCK_TYPE_UVD; - ring_mask = adev->uvd.ring.ready ? 1 : 0; + for (i = 0; i < adev->uvd.num_uvd_inst; i++) + ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 16; break; @@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_enc_rings; i++) - ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i); + for (i = 0; i < adev->uvd.num_uvd_inst; i++) + for (j = 0; j < adev->uvd.num_enc_rings; j++) + ring_mask |= + ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) << + (j + i * adev->uvd.num_enc_rings)); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 1; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c7d43e064fc7..9f1a5bd39ae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -52,6 +52,7 @@ static int psp_sw_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: psp_v3_1_set_psp_funcs(psp); break; case CHIP_RAVEN: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index 262c1267249e..8af16e81c7d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -66,6 +66,8 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, u32 ring, struct amdgpu_ring **out_ring) { + u32 instance; + switch (mapper->hw_ip) { case AMDGPU_HW_IP_GFX: *out_ring = &adev->gfx.gfx_ring[ring]; @@ -77,13 +79,16 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, *out_ring = &adev->sdma.instance[ring].ring; break; case AMDGPU_HW_IP_UVD: - *out_ring = &adev->uvd.ring; + instance = ring; + *out_ring = &adev->uvd.inst[instance].ring; break; case AMDGPU_HW_IP_VCE: *out_ring = &adev->vce.ring[ring]; break; case AMDGPU_HW_IP_UVD_ENC: - *out_ring = &adev->uvd.ring_enc[ring]; + instance = ring / adev->uvd.num_enc_rings; + *out_ring = + &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings]; break; case AMDGPU_HW_IP_VCN_DEC: *out_ring = &adev->vcn.ring_dec; @@ -240,13 +245,14 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, ip_num_rings = adev->sdma.num_instances; break; case AMDGPU_HW_IP_UVD: - ip_num_rings = 1; + ip_num_rings = adev->uvd.num_uvd_inst; break; case AMDGPU_HW_IP_VCE: ip_num_rings = adev->vce.num_rings; break; case AMDGPU_HW_IP_UVD_ENC: - ip_num_rings = adev->uvd.num_enc_rings; + ip_num_rings = + adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst; break; case AMDGPU_HW_IP_VCN_DEC: ip_num_rings = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 49cad08b5c16..c6850b629d0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; + ring->me = 0; ring->adev->rings[ring->idx] = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4f8dac2d36a5..1513124c5659 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -29,7 +29,7 @@ #include <drm/drm_print.h> /* max number of rings */ -#define AMDGPU_MAX_RINGS 18 +#define AMDGPU_MAX_RINGS 21 #define AMDGPU_MAX_GFX_RINGS 1 #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 3 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 69a2b25b3696..e93a0a237dc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -63,16 +63,44 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); /* * Global memory. */ + +/** + * amdgpu_ttm_mem_global_init - Initialize and acquire reference to + * memory object + * + * @ref: Object for initialization. + * + * This is called by drm_global_item_ref() when an object is being + * initialized. + */ static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) { return ttm_mem_global_init(ref->object); } +/** + * amdgpu_ttm_mem_global_release - Drop reference to a memory object + * + * @ref: Object being removed + * + * This is called by drm_global_item_unref() when an object is being + * released. + */ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } +/** + * amdgpu_ttm_global_init - Initialize global TTM memory reference + * structures. + * + * @adev: AMDGPU device for which the global structures need to be + * registered. + * + * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init() + * during bring up. + */ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; @@ -80,7 +108,9 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) struct drm_sched_rq *rq; int r; + /* ensure reference is false in case init fails */ adev->mman.mem_global_referenced = false; + global_ref = &adev->mman.mem_global_ref; global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); @@ -146,6 +176,18 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) return 0; } +/** + * amdgpu_init_mem_type - Initialize a memory manager for a specific + * type of memory request. + * + * @bdev: The TTM BO device object (contains a reference to + * amdgpu_device) + * @type: The type of memory requested + * @man: + * + * This is called by ttm_bo_init_mm() when a buffer object is being + * initialized. + */ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { @@ -161,6 +203,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: + /* GTT memory */ man->func = &amdgpu_gtt_mgr_func; man->gpu_offset = adev->gmc.gart_start; man->available_caching = TTM_PL_MASK_CACHING; @@ -193,6 +236,14 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, return 0; } +/** + * amdgpu_evict_flags - Compute placement flags + * + * @bo: The buffer object to evict + * @placement: Possible destination(s) for evicted BO + * + * Fill in placement data when ttm_bo_evict() is called + */ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { @@ -204,12 +255,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM }; + /* Don't handle scatter gather BOs */ if (bo->type == ttm_bo_type_sg) { placement->num_placement = 0; placement->num_busy_placement = 0; return; } + /* Object isn't an AMDGPU object so ignore */ if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { placement->placement = &placements; placement->busy_placement = &placements; @@ -217,10 +270,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, placement->num_busy_placement = 1; return; } + abo = ttm_to_amdgpu_bo(bo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: if (!adev->mman.buffer_funcs_enabled) { + /* Move to system memory */ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && @@ -238,6 +293,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, abo->placement.busy_placement = &abo->placements[1]; abo->placement.num_busy_placement = 1; } else { + /* Move to GTT memory */ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); } break; @@ -248,6 +304,15 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, *placement = abo->placement; } +/** + * amdgpu_verify_access - Verify access for a mmap call + * + * @bo: The buffer object to map + * @filp: The file pointer from the process performing the mmap + * + * This is called by ttm_bo_mmap() to verify whether a process + * has the right to mmap a BO to their process space. + */ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); @@ -265,6 +330,15 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) filp->private_data); } +/** + * amdgpu_move_null - Register memory for a buffer object + * + * @bo: The bo to assign the memory to + * @new_mem: The memory to be assigned. + * + * Assign the memory from new_mem to the memory of the buffer object + * bo. + */ static void amdgpu_move_null(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { @@ -275,6 +349,10 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, new_mem->mm_node = NULL; } +/** + * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT + * buffer. + */ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, struct drm_mm_node *mm_node, struct ttm_mem_reg *mem) @@ -289,9 +367,10 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, } /** - * amdgpu_find_mm_node - Helper function finds the drm_mm_node - * corresponding to @offset. It also modifies the offset to be - * within the drm_mm_node returned + * amdgpu_find_mm_node - Helper function finds the drm_mm_node + * corresponding to @offset. It also modifies + * the offset to be within the drm_mm_node + * returned */ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, unsigned long *offset) @@ -430,7 +509,12 @@ error: return r; } - +/** + * amdgpu_move_blit - Copy an entire buffer to another buffer + * + * This is a helper called by amdgpu_bo_move() and + * amdgpu_move_vram_ram() to help move buffers to and from VRAM. + */ static int amdgpu_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu, struct ttm_mem_reg *new_mem, @@ -465,6 +549,11 @@ error: return r; } +/** + * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer + * + * Called by amdgpu_bo_move(). + */ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) @@ -477,6 +566,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, int r; adev = amdgpu_ttm_adev(bo->bdev); + + /* create space/pages for new_mem in GTT space */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.num_placement = 1; @@ -491,25 +582,36 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, return r; } + /* set caching flags */ r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); if (unlikely(r)) { goto out_cleanup; } + /* Bind the memory to the GTT space */ r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); if (unlikely(r)) { goto out_cleanup; } + + /* blit VRAM to GTT */ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } + + /* move BO (in tmp_mem) to new_mem */ r = ttm_bo_move_ttm(bo, ctx, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; } +/** + * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM + * + * Called by amdgpu_bo_move(). + */ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) @@ -522,6 +624,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, int r; adev = amdgpu_ttm_adev(bo->bdev); + + /* make space in GTT for old_mem buffer */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.num_placement = 1; @@ -535,10 +639,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, if (unlikely(r)) { return r; } + + /* move/bind old memory to GTT space */ r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } + + /* copy to VRAM */ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; @@ -548,6 +656,11 @@ out_cleanup: return r; } +/** + * amdgpu_bo_move - Move a buffer object to a new memory location + * + * Called by ttm_bo_handle_move_mem() + */ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) @@ -613,6 +726,11 @@ memcpy: return 0; } +/** + * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault + * + * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() + */ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; @@ -690,6 +808,14 @@ struct amdgpu_ttm_tt { uint32_t last_set_pages; }; +/** + * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to + * by a USERPTR pointer to memory + * + * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos(). + * This provides a wrapper around the get_user_pages() call to provide + * device accessible pages that back user memory. + */ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -719,6 +845,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) } } + /* loop enough times using contiguous pages of memory */ do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; @@ -757,6 +884,14 @@ release_pages: return r; } +/** + * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages + * as necessary. + * + * Called by amdgpu_cs_list_validate(). This creates the page list + * that backs user memory and will ultimately be mapped into the device + * address space. + */ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -771,6 +906,11 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) } } +/** + * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty + * + * Called while unpinning userptr pages + */ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -789,7 +929,12 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) } } -/* prepare the sg table with the user pages */ +/** + * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the + * user pages + * + * Called by amdgpu_ttm_backend_bind() + **/ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); @@ -801,17 +946,20 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + /* Allocate an SG array and squash pages into it */ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; + /* Map SG to device */ r = -ENOMEM; nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); if (nents != ttm->sg->nents) goto release_sg; + /* convert SG to linear array of pages and dma addresses */ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); @@ -822,6 +970,9 @@ release_sg: return r; } +/** + * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages + */ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); @@ -835,9 +986,10 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) if (!ttm->sg->sgl) return; - /* free the sg table and pages again */ + /* unmap the pages mapped to the device */ dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + /* mark the pages as dirty */ amdgpu_ttm_tt_mark_user_pages(ttm); sg_free_table(ttm->sg); @@ -882,6 +1034,12 @@ gart_bind_fail: return r; } +/** + * amdgpu_ttm_backend_bind - Bind GTT memory + * + * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). + * This handles binding GTT memory to the device address space. + */ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { @@ -912,7 +1070,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, return 0; } + /* compute PTE flags relevant to this BO memory */ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); + + /* bind pages into GART page tables */ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); @@ -923,6 +1084,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, return r; } +/** + * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object + */ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); @@ -938,6 +1102,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) return 0; + /* allocate GTT space */ tmp = bo->mem; tmp.mm_node = NULL; placement.num_placement = 1; @@ -953,7 +1118,10 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) if (unlikely(r)) return r; + /* compute PTE flags for this buffer object */ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); + + /* Bind pages */ gtt->offset = (u64)tmp.start << PAGE_SHIFT; r = amdgpu_ttm_gart_bind(adev, bo, flags); if (unlikely(r)) { @@ -969,6 +1137,12 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) return 0; } +/** + * amdgpu_ttm_recover_gart - Rebind GTT pages + * + * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to + * rebind GTT pages during a GPU reset. + */ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); @@ -984,12 +1158,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) return r; } +/** + * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages + * + * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and + * ttm_tt_destroy(). + */ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; + /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) amdgpu_ttm_tt_unpin_userptr(ttm); @@ -1021,6 +1202,13 @@ static struct ttm_backend_func amdgpu_backend_func = { .destroy = &amdgpu_ttm_backend_destroy, }; +/** + * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO + * + * @bo: The buffer object to create a GTT ttm_tt object around + * + * Called by ttm_tt_create(). + */ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { @@ -1034,6 +1222,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return NULL; } gtt->ttm.ttm.func = &amdgpu_backend_func; + + /* allocate space for the uninitialized page entries */ if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { kfree(gtt); return NULL; @@ -1041,6 +1231,12 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return >t->ttm.ttm; } +/** + * amdgpu_ttm_tt_populate - Map GTT pages visible to the device + * + * Map the pages of a ttm_tt object to an address space visible + * to the underlying device. + */ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { @@ -1048,6 +1244,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, struct amdgpu_ttm_tt *gtt = (void *)ttm; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ if (gtt && gtt->userptr) { ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) @@ -1072,9 +1269,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, } #endif + /* fall back to generic helper to populate the page array + * and map them to the device */ return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); } +/** + * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays + * + * Unmaps pages of a ttm_tt object from the device address space and + * unpopulates the page array backing it. + */ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct amdgpu_device *adev; @@ -1100,9 +1305,21 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) } #endif + /* fall back to generic helper to unmap and unpopulate array */ ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); } +/** + * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt + * for the current task + * + * @ttm: The ttm_tt object to bind this userptr object to + * @addr: The address in the current tasks VM space to use + * @flags: Requirements of userptr object. + * + * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages + * to current task + */ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags) { @@ -1127,6 +1344,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return 0; } +/** + * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object + */ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1140,6 +1360,12 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) return gtt->usertask->mm; } +/** + * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays + * inside an address range for the + * current task. + * + */ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, unsigned long end) { @@ -1150,10 +1376,16 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, if (gtt == NULL || !gtt->userptr) return false; + /* Return false if no part of the ttm_tt object lies within + * the range + */ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; if (gtt->userptr > end || gtt->userptr + size <= start) return false; + /* Search the lists of tasks that hold this mapping and see + * if current is one of them. If it is return false. + */ spin_lock(>t->guptasklock); list_for_each_entry(entry, >t->guptasks, list) { if (entry->task == current) { @@ -1168,6 +1400,10 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, return true; } +/** + * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been + * invalidated? + */ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated) { @@ -1178,6 +1414,12 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, return prev_invalidated != *last_invalidated; } +/** + * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this + * ttm_tt object been invalidated + * since the last time they've + * been set? + */ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1188,6 +1430,9 @@ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages; } +/** + * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? + */ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1198,6 +1443,12 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); } +/** + * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object + * + * @ttm: The ttm_tt object to compute the flags for + * @mem: The memory registry backing this ttm_tt object + */ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem) { @@ -1222,6 +1473,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, return flags; } +/** + * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict + * a buffer object. + * + * Return true if eviction is sensible. Called by + * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space() + * which tries to evict buffer objects until it can find space + * for a new object and by ttm_bo_force_list_clean() which is + * used to clean out a memory space. + */ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { @@ -1268,6 +1529,19 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, return ttm_bo_eviction_valuable(bo, place); } +/** + * amdgpu_ttm_access_memory - Read or Write memory that backs a + * buffer object. + * + * @bo: The buffer object to read/write + * @offset: Offset into buffer object + * @buf: Secondary buffer to write/read from + * @len: Length in bytes of access + * @write: true if writing + * + * This is used to access VRAM that backs a buffer object via MMIO + * access for debugging purposes. + */ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, unsigned long offset, void *buf, int len, int write) @@ -1444,13 +1718,22 @@ error_create: adev->fw_vram_usage.reserved_bo = NULL; return r; } - +/** + * amdgpu_ttm_init - Init the memory management (ttm) as well as + * various gtt/vram related fields. + * + * This initializes all of the memory space pools that the TTM layer + * will need such as the GTT space (system memory mapped to the device), + * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which + * can be mapped per VMID. + */ int amdgpu_ttm_init(struct amdgpu_device *adev) { uint64_t gtt_size; int r; u64 vis_vram_limit; + /* initialize global references for vram/gtt */ r = amdgpu_ttm_global_init(adev); if (r) { return r; @@ -1471,6 +1754,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* We opt to avoid OOM on system pages allocations */ adev->mman.bdev.no_retry = true; + /* Initialize VRAM pool with all of VRAM divided into pages */ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, adev->gmc.real_vram_size >> PAGE_SHIFT); if (r) { @@ -1500,6 +1784,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + /* allocate memory as required for VGA + * This is used for VGA emulation and pre-OS scanout buffers to + * avoid display artifacts while transitioning between pre-OS + * and driver. */ if (adev->gmc.stolen_size) { r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, @@ -1511,6 +1799,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); + /* Compute GTT size, either bsaed on 3/4th the size of RAM size + * or whatever the user passed on module init */ if (amdgpu_gtt_size == -1) { struct sysinfo si; @@ -1521,6 +1811,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } else gtt_size = (uint64_t)amdgpu_gtt_size << 20; + + /* Initialize GTT memory pool */ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); @@ -1529,6 +1821,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned)(gtt_size / (1024 * 1024))); + /* Initialize various on-chip memory pools */ adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; @@ -1568,6 +1861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } } + /* Register debugfs entries for amdgpu_ttm */ r = amdgpu_ttm_debugfs_init(adev); if (r) { DRM_ERROR("Failed to init debugfs\n"); @@ -1576,11 +1870,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_ttm_late_init - Handle any late initialization for + * amdgpu_ttm + */ void amdgpu_ttm_late_init(struct amdgpu_device *adev) { + /* return the VGA stolen memory (if any) back to VRAM */ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); } +/** + * amdgpu_ttm_fini - De-initialize the TTM memory pools + */ void amdgpu_ttm_fini(struct amdgpu_device *adev) { if (!adev->mman.initialized) @@ -1908,6 +2210,11 @@ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { #endif }; +/** + * amdgpu_ttm_vram_read - Linear read access to VRAM + * + * Accesses VRAM via MMIO for debugging purposes. + */ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1947,6 +2254,11 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, return result; } +/** + * amdgpu_ttm_vram_write - Linear write access to VRAM + * + * Accesses VRAM via MMIO for debugging purposes. + */ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { @@ -1995,6 +2307,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = { #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS +/** + * amdgpu_ttm_gtt_read - Linear read access to GTT memory + */ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -2042,6 +2357,13 @@ static const struct file_operations amdgpu_ttm_gtt_fops = { #endif +/** + * amdgpu_iomem_read - Virtual read access to GPU mapped memory + * + * This function is used to read memory that has been mapped to the + * GPU and the known addresses are not physical addresses but instead + * bus addresses (e.g., what you'd put in an IB or ring buffer). + */ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -2050,6 +2372,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, ssize_t result = 0; int r; + /* retrieve the IOMMU domain if any for this device */ dom = iommu_get_domain_for_dev(adev->dev); while (size) { @@ -2062,6 +2385,10 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, bytes = bytes < size ? bytes : size; + /* Translate the bus address to a physical address. If + * the domain is NULL it means there is no IOMMU active + * and the address translation is the identity + */ addr = dom ? iommu_iova_to_phys(dom, addr) : addr; pfn = addr >> PAGE_SHIFT; @@ -2086,6 +2413,13 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, return result; } +/** + * amdgpu_iomem_write - Virtual write access to GPU mapped memory + * + * This function is used to write memory that has been mapped to the + * GPU and the known addresses are not physical addresses but instead + * bus addresses (e.g., what you'd put in an IB or ring buffer). + */ static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 75592bd04d6a..f55f72a37ca8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -307,6 +307,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; + case CHIP_VEGA20: + return AMDGPU_FW_LOAD_DIRECT; default: DRM_ERROR("Unknown firmware load type\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index de4d77af02ae..bcf68f80bbf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -70,12 +70,14 @@ #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" +#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" -#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) -#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) -#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00) -#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00) -#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00) +/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */ +#define UVD_GPCOM_VCPU_CMD 0x03c3 +#define UVD_GPCOM_VCPU_DATA0 0x03c4 +#define UVD_GPCOM_VCPU_DATA1 0x03c5 +#define UVD_NO_OP 0x03ff +#define UVD_BASE_SI 0x3800 /** * amdgpu_uvd_cs_ctx - Command submission parser context @@ -114,6 +116,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM); MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA12); +MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); @@ -125,9 +128,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) const char *fw_name; const struct common_firmware_header *hdr; unsigned version_major, version_minor, family_id; - int i, r; + int i, j, r; - INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); + INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK @@ -177,6 +180,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_VEGAM: fw_name = FIRMWARE_VEGAM; break; + case CHIP_VEGA20: + fw_name = FIRMWARE_VEGA20; + break; default: return -EINVAL; } @@ -231,28 +237,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); - r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, - &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); - return r; - } + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { - ring = &adev->uvd.ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, - rq, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up UVD run queue.\n"); - return r; - } + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, + &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); + return r; + } - for (i = 0; i < adev->uvd.max_handles; ++i) { - atomic_set(&adev->uvd.handles[i], 0); - adev->uvd.filp[i] = NULL; - } + ring = &adev->uvd.inst[j].ring; + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, + rq, NULL); + if (r != 0) { + DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); + return r; + } + for (i = 0; i < adev->uvd.max_handles; ++i) { + atomic_set(&adev->uvd.inst[j].handles[i], 0); + adev->uvd.inst[j].filp[i] = NULL; + } + } /* from uvd v5.0 HW addressing capacity increased to 64 bits */ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; @@ -279,20 +287,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { - int i; - kfree(adev->uvd.saved_bo); + int i, j; - drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + kfree(adev->uvd.inst[j].saved_bo); - amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, - &adev->uvd.gpu_addr, - (void **)&adev->uvd.cpu_addr); + drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); - amdgpu_ring_fini(&adev->uvd.ring); + amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, + &adev->uvd.inst[j].gpu_addr, + (void **)&adev->uvd.inst[j].cpu_addr); - for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst[j].ring); + for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) + amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); + } release_firmware(adev->uvd.fw); return 0; @@ -302,32 +312,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; - - if (adev->uvd.vcpu_bo == NULL) - return 0; + int i, j; - cancel_delayed_work_sync(&adev->uvd.idle_work); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + if (adev->uvd.inst[j].vcpu_bo == NULL) + continue; - /* only valid for physical mode */ - if (adev->asic_type < CHIP_POLARIS10) { - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - break; + cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); - if (i == adev->uvd.max_handles) - return 0; - } + /* only valid for physical mode */ + if (adev->asic_type < CHIP_POLARIS10) { + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.inst[j].handles[i])) + break; - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - ptr = adev->uvd.cpu_addr; + if (i == adev->uvd.max_handles) + continue; + } - adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - if (!adev->uvd.saved_bo) - return -ENOMEM; + size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); + ptr = adev->uvd.inst[j].cpu_addr; - memcpy_fromio(adev->uvd.saved_bo, ptr, size); + adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL); + if (!adev->uvd.inst[j].saved_bo) + return -ENOMEM; + memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + } return 0; } @@ -335,59 +346,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; + int i; - if (adev->uvd.vcpu_bo == NULL) - return -EINVAL; + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + if (adev->uvd.inst[i].vcpu_bo == NULL) + return -EINVAL; - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - ptr = adev->uvd.cpu_addr; + size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); + ptr = adev->uvd.inst[i].cpu_addr; - if (adev->uvd.saved_bo != NULL) { - memcpy_toio(ptr, adev->uvd.saved_bo, size); - kfree(adev->uvd.saved_bo); - adev->uvd.saved_bo = NULL; - } else { - const struct common_firmware_header *hdr; - unsigned offset; - - hdr = (const struct common_firmware_header *)adev->uvd.fw->data; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); - size -= le32_to_cpu(hdr->ucode_size_bytes); - ptr += le32_to_cpu(hdr->ucode_size_bytes); + if (adev->uvd.inst[i].saved_bo != NULL) { + memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); + kfree(adev->uvd.inst[i].saved_bo); + adev->uvd.inst[i].saved_bo = NULL; + } else { + const struct common_firmware_header *hdr; + unsigned offset; + + hdr = (const struct common_firmware_header *)adev->uvd.fw->data; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + size -= le32_to_cpu(hdr->ucode_size_bytes); + ptr += le32_to_cpu(hdr->ucode_size_bytes); + } + memset_io(ptr, 0, size); + /* to restore uvd fence seq */ + amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); } - memset_io(ptr, 0, size); - /* to restore uvd fence seq */ - amdgpu_fence_driver_force_completion(&adev->uvd.ring); } - return 0; } void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) { - struct amdgpu_ring *ring = &adev->uvd.ring; - int i, r; + struct amdgpu_ring *ring; + int i, j, r; - for (i = 0; i < adev->uvd.max_handles; ++i) { - uint32_t handle = atomic_read(&adev->uvd.handles[i]); - if (handle != 0 && adev->uvd.filp[i] == filp) { - struct dma_fence *fence; - - r = amdgpu_uvd_get_destroy_msg(ring, handle, - false, &fence); - if (r) { - DRM_ERROR("Error destroying UVD (%d)!\n", r); - continue; - } + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + ring = &adev->uvd.inst[j].ring; + + for (i = 0; i < adev->uvd.max_handles; ++i) { + uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); + if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) { + struct dma_fence *fence; + + r = amdgpu_uvd_get_destroy_msg(ring, handle, + false, &fence); + if (r) { + DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r); + continue; + } - dma_fence_wait(fence, false); - dma_fence_put(fence); + dma_fence_wait(fence, false); + dma_fence_put(fence); - adev->uvd.filp[i] = NULL; - atomic_set(&adev->uvd.handles[i], 0); + adev->uvd.inst[j].filp[i] = NULL; + atomic_set(&adev->uvd.inst[j].handles[i], 0); + } } } } @@ -662,15 +679,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, void *ptr; long r; int i; + uint32_t ip_instance = ctx->parser->job->ring->me; if (offset & 0x3F) { - DRM_ERROR("UVD messages must be 64 byte aligned!\n"); + DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); return -EINVAL; } r = amdgpu_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); + DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); return r; } @@ -680,7 +698,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, handle = msg[2]; if (handle == 0) { - DRM_ERROR("Invalid UVD handle!\n"); + DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); return -EINVAL; } @@ -691,18 +709,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* try to alloc a new handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.handles[i]) == handle) { - DRM_ERROR("Handle 0x%x already in use!\n", handle); + if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { + DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); return -EINVAL; } - if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { - adev->uvd.filp[i] = ctx->parser->filp; + if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { + adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; return 0; } } - DRM_ERROR("No more free UVD handles!\n"); + DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); return -ENOSPC; case 1: @@ -714,27 +732,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* validate the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.handles[i]) == handle) { - if (adev->uvd.filp[i] != ctx->parser->filp) { - DRM_ERROR("UVD handle collision detected!\n"); + if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { + if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { + DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); return -EINVAL; } return 0; } } - DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); return -ENOENT; case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) - atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); + atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; default: - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); return -EINVAL; } BUG(); @@ -805,7 +823,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) } if ((cmd == 0 || cmd == 0x3) && - (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { + (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", start, end); return -EINVAL; @@ -973,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, uint64_t addr; long r; int i; + unsigned offset_idx = 0; + unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; amdgpu_bo_kunmap(bo); amdgpu_bo_unpin(bo); @@ -992,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err; if (adev->asic_type >= CHIP_VEGA10) { - data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0); - data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0); - data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0); - data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0); - } else { - data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); - data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); - data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); - data[3] = PACKET0(mmUVD_NO_OP, 0); + offset_idx = 1 + ring->me; + offset[1] = adev->reg_offset[UVD_HWIP][0][1]; + offset[2] = adev->reg_offset[UVD_HWIP][1][1]; } + data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0); + data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0); + data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0); + data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0); + ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); ib->ptr[0] = data[0]; @@ -1038,7 +1057,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; - r = amdgpu_job_submit(job, ring, &adev->uvd.entity, + r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; @@ -1126,8 +1145,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, uvd.idle_work.work); - unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); + container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); + unsigned fences = 0, i, j; + + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); + for (j = 0; j < adev->uvd.num_enc_rings; ++j) { + fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); + } + } if (fences == 0) { if (adev->pm.dpm_enabled) { @@ -1141,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) AMD_CG_STATE_GATE); } } else { - schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); } } @@ -1153,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) if (amdgpu_sriov_vf(adev)) return; - set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); + set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, true); @@ -1170,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) { if (!amdgpu_sriov_vf(ring->adev)) - schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); } /** @@ -1184,27 +1210,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence; long r; + uint32_t ip_instance = ring->me; r = amdgpu_uvd_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r); goto error; } r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r); goto error; } r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance); r = -ETIMEDOUT; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r); } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx); r = 0; } @@ -1232,7 +1259,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) * necessarily linear. So we need to count * all non-zero handles. */ - if (atomic_read(&adev->uvd.handles[i])) + if (atomic_read(&adev->uvd.inst->handles[i])) used_handles++; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 32ea20b99e53..b1579fba134c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -31,30 +31,37 @@ #define AMDGPU_UVD_SESSION_SIZE (50*1024) #define AMDGPU_UVD_FIRMWARE_OFFSET 256 +#define AMDGPU_MAX_UVD_INSTANCES 2 + #define AMDGPU_UVD_FIRMWARE_SIZE(adev) \ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \ 8) - AMDGPU_UVD_FIRMWARE_OFFSET) -struct amdgpu_uvd { +struct amdgpu_uvd_inst { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; - unsigned fw_version; void *saved_bo; - unsigned max_handles; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; - const struct firmware *fw; /* UVD firmware */ struct amdgpu_ring ring; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_irq_src irq; - bool address_64_bit; - bool use_ctx_buf; struct drm_sched_entity entity; struct drm_sched_entity entity_enc; uint32_t srbm_soft_reset; +}; + +struct amdgpu_uvd { + const struct firmware *fw; /* UVD firmware */ + unsigned fw_version; + unsigned max_handles; unsigned num_enc_rings; + uint8_t num_uvd_inst; + bool address_64_bit; + bool use_ctx_buf; + struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index a86322f5164f..23d960ec1cf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -57,6 +57,7 @@ #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" +#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin" #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); @@ -76,6 +77,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM); MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA12); +MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_vce_idle_work_handler(struct work_struct *work); @@ -143,6 +145,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_VEGA12: fw_name = FIRMWARE_VEGA12; break; + case CHIP_VEGA20: + fw_name = FIRMWARE_VEGA20; + break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e5d234cf804f..8851bcdfc260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -205,13 +205,18 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) struct amdgpu_device *adev = container_of(work, struct amdgpu_device, vcn.idle_work.work); unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); + unsigned i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); + } if (fences == 0) { - if (adev->pm.dpm_enabled) { - /* might be used when with pg/cg + if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, false); - */ - } + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); } else { schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); } @@ -223,9 +228,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); if (set_clocks && adev->pm.dpm_enabled) { - /* might be used when with pg/cg - amdgpu_dpm_enable_uvd(adev, true); - */ + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 2fd7db891689..181e6afa9847 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -45,6 +45,17 @@ #define VCN_ENC_CMD_REG_WRITE 0x0000000b #define VCN_ENC_CMD_REG_WAIT 0x0000000c +enum engine_status_constants { + UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0, + UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002, + UVD_STATUS__UVD_BUSY = 0x00000004, + GB_ADDR_CONFIG_DEFAULT = 0x26010011, + UVD_STATUS__IDLE = 0x2, + UVD_STATUS__BUSY = 0x5, + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1, + UVD_STATUS__RBC_BUSY = 0x1, +}; + struct amdgpu_vcn { struct amdgpu_bo *vcpu_bo; void *cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1a8f4e0dd023..ccba88cc8c54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * is currently evicted. add the bo to the evicted list to make sure it * is validated on next vm use to avoid fault. * */ - spin_lock(&vm->status_lock); list_move_tail(&base->vm_status, &vm->evicted); - spin_unlock(&vm->status_lock); } /** @@ -226,24 +224,16 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { struct ttm_bo_global *glob = adev->mman.bdev.glob; - int r; - - spin_lock(&vm->status_lock); - while (!list_empty(&vm->evicted)) { - struct amdgpu_vm_bo_base *bo_base; - struct amdgpu_bo *bo; + struct amdgpu_vm_bo_base *bo_base, *tmp; + int r = 0; - bo_base = list_first_entry(&vm->evicted, - struct amdgpu_vm_bo_base, - vm_status); - spin_unlock(&vm->status_lock); + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { + struct amdgpu_bo *bo = bo_base->bo; - bo = bo_base->bo; - BUG_ON(!bo); if (bo->parent) { r = validate(param, bo); if (r) - return r; + break; spin_lock(&glob->lru_lock); ttm_bo_move_to_lru_tail(&bo->tbo); @@ -252,22 +242,29 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_unlock(&glob->lru_lock); } - if (bo->tbo.type == ttm_bo_type_kernel && - vm->use_cpu_for_update) { - r = amdgpu_bo_kmap(bo, NULL); - if (r) - return r; - } - - spin_lock(&vm->status_lock); - if (bo->tbo.type != ttm_bo_type_kernel) + if (bo->tbo.type != ttm_bo_type_kernel) { + spin_lock(&vm->moved_lock); list_move(&bo_base->vm_status, &vm->moved); - else + spin_unlock(&vm->moved_lock); + } else { list_move(&bo_base->vm_status, &vm->relocated); + } } - spin_unlock(&vm->status_lock); - return 0; + spin_lock(&glob->lru_lock); + list_for_each_entry(bo_base, &vm->idle, vm_status) { + struct amdgpu_bo *bo = bo_base->bo; + + if (!bo->parent) + continue; + + ttm_bo_move_to_lru_tail(&bo->tbo); + if (bo->shadow) + ttm_bo_move_to_lru_tail(&bo->shadow->tbo); + } + spin_unlock(&glob->lru_lock); + + return r; } /** @@ -279,13 +276,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - bool ready; - - spin_lock(&vm->status_lock); - ready = list_empty(&vm->evicted); - spin_unlock(&vm->status_lock); - - return ready; + return list_empty(&vm->evicted); } /** @@ -477,9 +468,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); - spin_lock(&vm->status_lock); list_move(&entry->base.vm_status, &vm->relocated); - spin_unlock(&vm->status_lock); } if (level < AMDGPU_VM_PTB) { @@ -926,10 +915,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, if (!entry->base.bo) continue; - spin_lock(&vm->status_lock); - if (list_empty(&entry->base.vm_status)) - list_add(&entry->base.vm_status, &vm->relocated); - spin_unlock(&vm->status_lock); + if (!entry->base.moved) + list_move(&entry->base.vm_status, &vm->relocated); amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); } } @@ -959,6 +946,14 @@ restart: params.adev = adev; if (vm->use_cpu_for_update) { + struct amdgpu_vm_bo_base *bo_base; + + list_for_each_entry(bo_base, &vm->relocated, vm_status) { + r = amdgpu_bo_kmap(bo_base->bo, NULL); + if (unlikely(r)) + return r; + } + r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) return r; @@ -974,7 +969,6 @@ restart: params.func = amdgpu_vm_do_set_ptes; } - spin_lock(&vm->status_lock); while (!list_empty(&vm->relocated)) { struct amdgpu_vm_bo_base *bo_base, *parent; struct amdgpu_vm_pt *pt, *entry; @@ -983,14 +977,12 @@ restart: bo_base = list_first_entry(&vm->relocated, struct amdgpu_vm_bo_base, vm_status); - list_del_init(&bo_base->vm_status); - spin_unlock(&vm->status_lock); + bo_base->moved = false; + list_move(&bo_base->vm_status, &vm->idle); bo = bo_base->bo->parent; - if (!bo) { - spin_lock(&vm->status_lock); + if (!bo) continue; - } parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base, bo_list); @@ -999,12 +991,10 @@ restart: amdgpu_vm_update_pde(¶ms, vm, pt, entry); - spin_lock(&vm->status_lock); if (!vm->use_cpu_for_update && (ndw - params.ib->length_dw) < 32) break; } - spin_unlock(&vm->status_lock); if (vm->use_cpu_for_update) { /* Flush HDP */ @@ -1107,9 +1097,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, if (entry->huge) { /* Add the entry to the relocated list to update it. */ entry->huge = false; - spin_lock(&p->vm->status_lock); list_move(&entry->base.vm_status, &p->vm->relocated); - spin_unlock(&p->vm->status_lock); } return; } @@ -1588,18 +1576,22 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, amdgpu_asic_flush_hdp(adev, NULL); } - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); list_del_init(&bo_va->base.vm_status); + spin_unlock(&vm->moved_lock); /* If the BO is not in its preferred location add it back to * the evicted list so that it gets validated again on the * next command submission. */ - if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && - !(bo->preferred_domains & - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))) - list_add_tail(&bo_va->base.vm_status, &vm->evicted); - spin_unlock(&vm->status_lock); + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { + uint32_t mem_type = bo->tbo.mem.mem_type; + + if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) + list_add_tail(&bo_va->base.vm_status, &vm->evicted); + else + list_add(&bo_va->base.vm_status, &vm->idle); + } list_splice_init(&bo_va->invalids, &bo_va->valids); bo_va->cleared = clear; @@ -1808,19 +1800,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm) { + struct amdgpu_bo_va *bo_va, *tmp; + struct list_head moved; bool clear; - int r = 0; - - spin_lock(&vm->status_lock); - while (!list_empty(&vm->moved)) { - struct amdgpu_bo_va *bo_va; - struct reservation_object *resv; + int r; - bo_va = list_first_entry(&vm->moved, - struct amdgpu_bo_va, base.vm_status); - spin_unlock(&vm->status_lock); + INIT_LIST_HEAD(&moved); + spin_lock(&vm->moved_lock); + list_splice_init(&vm->moved, &moved); + spin_unlock(&vm->moved_lock); - resv = bo_va->base.bo->tbo.resv; + list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) { + struct reservation_object *resv = bo_va->base.bo->tbo.resv; /* Per VM BOs never need to bo cleared in the page tables */ if (resv == vm->root.base.bo->tbo.resv) @@ -1833,17 +1824,19 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, clear = true; r = amdgpu_vm_bo_update(adev, bo_va, clear); - if (r) + if (r) { + spin_lock(&vm->moved_lock); + list_splice(&moved, &vm->moved); + spin_unlock(&vm->moved_lock); return r; + } if (!clear && resv != vm->root.base.bo->tbo.resv) reservation_object_unlock(resv); - spin_lock(&vm->status_lock); } - spin_unlock(&vm->status_lock); - return r; + return 0; } /** @@ -1902,11 +1895,11 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, if (mapping->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); - if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { - spin_lock(&vm->status_lock); - if (list_empty(&bo_va->base.vm_status)) - list_add(&bo_va->base.vm_status, &vm->moved); - spin_unlock(&vm->status_lock); + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && + !bo_va->base.moved) { + spin_lock(&vm->moved_lock); + list_move(&bo_va->base.vm_status, &vm->moved); + spin_unlock(&vm->moved_lock); } trace_amdgpu_vm_bo_map(bo_va, mapping); } @@ -2216,9 +2209,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_del(&bo_va->base.bo_list); - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); list_del(&bo_va->base.vm_status); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); @@ -2258,31 +2251,28 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, list_for_each_entry(bo_base, &bo->va, bo_list) { struct amdgpu_vm *vm = bo_base->vm; + bool was_moved = bo_base->moved; bo_base->moved = true; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { - spin_lock(&bo_base->vm->status_lock); if (bo->tbo.type == ttm_bo_type_kernel) list_move(&bo_base->vm_status, &vm->evicted); else list_move_tail(&bo_base->vm_status, &vm->evicted); - spin_unlock(&bo_base->vm->status_lock); continue; } - if (bo->tbo.type == ttm_bo_type_kernel) { - spin_lock(&bo_base->vm->status_lock); - if (list_empty(&bo_base->vm_status)) - list_add(&bo_base->vm_status, &vm->relocated); - spin_unlock(&bo_base->vm->status_lock); + if (was_moved) continue; - } - spin_lock(&bo_base->vm->status_lock); - if (list_empty(&bo_base->vm_status)) - list_add(&bo_base->vm_status, &vm->moved); - spin_unlock(&bo_base->vm->status_lock); + if (bo->tbo.type == ttm_bo_type_kernel) { + list_move(&bo_base->vm_status, &vm->relocated); + } else { + spin_lock(&bo_base->vm->moved_lock); + list_move(&bo_base->vm_status, &vm->moved); + spin_unlock(&bo_base->vm->moved_lock); + } } } @@ -2391,10 +2381,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->va = RB_ROOT_CACHED; for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; - spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->evicted); INIT_LIST_HEAD(&vm->relocated); + spin_lock_init(&vm->moved_lock); INIT_LIST_HEAD(&vm->moved); + INIT_LIST_HEAD(&vm->idle); INIT_LIST_HEAD(&vm->freed); /* create scheduler entity for page table updates */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 4cf678684a12..061b99a18cb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -168,9 +168,6 @@ struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; - /* protecting invalidated */ - spinlock_t status_lock; - /* BOs who needs a validation */ struct list_head evicted; @@ -179,6 +176,10 @@ struct amdgpu_vm { /* BOs moved, but not yet updated in the PT */ struct list_head moved; + spinlock_t moved_lock; + + /* All BOs of this VM not currently in the state machine */ + struct list_head idle; /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; @@ -187,9 +188,6 @@ struct amdgpu_vm { struct amdgpu_vm_pt root; struct dma_fence *last_update; - /* protecting freed */ - spinlock_t freed_lock; - /* Scheduler entity for page table updates */ struct drm_sched_entity entity; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index de7be3de0f41..dbf2ccd0c744 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -473,6 +473,7 @@ static int dce_virtual_hw_init(void *handle) break; case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: break; default: DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index 4ffda996660f..9935371db7ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -102,6 +102,13 @@ static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev, *flags |= AMD_CG_SUPPORT_DF_MGCG; } +static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev, + bool enable) +{ + WREG32_FIELD15(DF, 0, DF_CS_AON0_CoherentSlaveModeCtrlA0, + ForceParWrRMW, enable); +} + const struct amdgpu_df_funcs df_v1_7_funcs = { .init = df_v1_7_init, .enable_broadcast_mode = df_v1_7_enable_broadcast_mode, @@ -109,4 +116,5 @@ const struct amdgpu_df_funcs df_v1_7_funcs = { .get_hbm_channel_number = df_v1_7_get_hbm_channel_number, .update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating, .get_clockgating_state = df_v1_7_get_clockgating_state, + .enable_ecc_force_par_wr_rmw = df_v1_7_enable_ecc_force_par_wr_rmw, }; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c new file mode 100644 index 000000000000..60608b3df881 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -0,0 +1,116 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "df_v3_6.h" + +#include "df/df_3_6_default.h" +#include "df/df_3_6_offset.h" +#include "df/df_3_6_sh_mask.h" + +static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, + 16, 32, 0, 0, 0, 2, 4, 8}; + +static void df_v3_6_init(struct amdgpu_device *adev) +{ +} + +static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + if (enable) { + tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl); + tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK; + WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp); + } else + WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, + mmFabricConfigAccessControl_DEFAULT); +} + +static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0); + tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; + tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + + return tmp; +} + +static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) +{ + int fb_channel_number; + + fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); + if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number)) + fb_channel_number = 0; + + return df_v3_6_channel_number[fb_channel_number]; +} + +static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + /* Put DF on broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, true); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; + WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } else { + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_DISABLE; + WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } + + /* Exit broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, false); +} + +static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + u32 tmp; + + /* AMD_CG_SUPPORT_DF_MGCG */ + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY) + *flags |= AMD_CG_SUPPORT_DF_MGCG; +} + +const struct amdgpu_df_funcs df_v3_6_funcs = { + .init = df_v3_6_init, + .enable_broadcast_mode = df_v3_6_enable_broadcast_mode, + .get_fb_channel_number = df_v3_6_get_fb_channel_number, + .get_hbm_channel_number = df_v3_6_get_hbm_channel_number, + .update_medium_grain_clock_gating = + df_v3_6_update_medium_grain_clock_gating, + .get_clockgating_state = df_v3_6_get_clockgating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h new file mode 100644 index 000000000000..e79c58e5efcb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DF_V3_6_H__ +#define __DF_V3_6_H__ + +#include "soc15_common.h" + +enum DF_V3_6_MGCG { + DF_V3_6_MGCG_DISABLE = 0, + DF_V3_6_MGCG_ENABLE_00_CYCLE_DELAY = 1, + DF_V3_6_MGCG_ENABLE_01_CYCLE_DELAY = 2, + DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY = 13, + DF_V3_6_MGCG_ENABLE_31_CYCLE_DELAY = 14, + DF_V3_6_MGCG_ENABLE_63_CYCLE_DELAY = 15 +}; + +extern const struct amdgpu_df_funcs df_v3_6_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index fc1911834ab5..d7530fdfaad5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -27,6 +27,7 @@ #include "amdgpu_gfx.h" #include "soc15.h" #include "soc15d.h" +#include "amdgpu_atomfirmware.h" #include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_sh_mask.h" @@ -63,6 +64,13 @@ MODULE_FIRMWARE("amdgpu/vega12_mec.bin"); MODULE_FIRMWARE("amdgpu/vega12_mec2.bin"); MODULE_FIRMWARE("amdgpu/vega12_rlc.bin"); +MODULE_FIRMWARE("amdgpu/vega20_ce.bin"); +MODULE_FIRMWARE("amdgpu/vega20_pfp.bin"); +MODULE_FIRMWARE("amdgpu/vega20_me.bin"); +MODULE_FIRMWARE("amdgpu/vega20_mec.bin"); +MODULE_FIRMWARE("amdgpu/vega20_mec2.bin"); +MODULE_FIRMWARE("amdgpu/vega20_rlc.bin"); + MODULE_FIRMWARE("amdgpu/raven_ce.bin"); MODULE_FIRMWARE("amdgpu/raven_pfp.bin"); MODULE_FIRMWARE("amdgpu/raven_me.bin"); @@ -72,29 +80,22 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); static const struct soc15_reg_golden golden_settings_gc_9_0[] = { - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) }; static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = @@ -108,6 +109,20 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800) }; +static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000) +}; + static const struct soc15_reg_golden golden_settings_gc_9_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), @@ -241,6 +256,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_2_1_vg12, ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); break; + case CHIP_VEGA20: + soc15_program_register_sequence(adev, + golden_settings_gc_9_0, + ARRAY_SIZE(golden_settings_gc_9_0)); + soc15_program_register_sequence(adev, + golden_settings_gc_9_0_vg20, + ARRAY_SIZE(golden_settings_gc_9_0_vg20)); + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_gc_9_1, @@ -468,6 +491,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) case CHIP_VEGA12: chip_name = "vega12"; break; + case CHIP_VEGA20: + chip_name = "vega20"; + break; case CHIP_RAVEN: chip_name = "raven"; break; @@ -1088,9 +1114,10 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q }; -static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) +static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; + int err; adev->gfx.funcs = &gfx_v9_0_gfx_funcs; @@ -1112,6 +1139,20 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; DRM_INFO("fix gfx.config for vega12\n"); break; + case CHIP_VEGA20: + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); + gb_addr_config &= ~0xf3e777ff; + gb_addr_config |= 0x22014042; + /* check vbios table if gpu info is not available */ + err = amdgpu_atomfirmware_get_gfx_info(adev); + if (err) + return err; + break; case CHIP_RAVEN: adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; @@ -1161,6 +1202,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.gb_addr_config, GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); + + return 0; } static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, @@ -1394,6 +1437,7 @@ static int gfx_v9_0_sw_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: adev->gfx.mec.num_mec = 2; break; @@ -1521,7 +1565,9 @@ static int gfx_v9_0_sw_init(void *handle) adev->gfx.ce_ram_size = 0x8000; - gfx_v9_0_gpu_early_init(adev); + r = gfx_v9_0_gpu_early_init(adev); + if (r) + return r; r = gfx_v9_0_ngg_init(adev); if (r) @@ -3688,6 +3734,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: gfx_v9_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -4680,6 +4727,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6cccf0e0acd7..3c0a85d4e4ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -675,6 +675,7 @@ static int gmc_v9_0_late_init(void *handle) DRM_INFO("ECC is active.\n"); } else if (r == 0) { DRM_INFO("ECC is not present.\n"); + adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); } else { DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r); return r; @@ -693,10 +694,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_device_vram_location(adev, &adev->gmc, base); amdgpu_device_gart_location(adev, mc); /* base offset of vram pages */ - if (adev->flags & AMD_IS_APU) - adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); - else - adev->vm_manager.vram_base_offset = 0; + adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); } /** @@ -755,6 +753,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: /* all engines support GPUVM */ case CHIP_VEGA12: /* all engines support GPUVM */ + case CHIP_VEGA20: default: adev->gmc.gart_size = 512ULL << 20; break; @@ -860,6 +859,7 @@ static int gmc_v9_0_sw_init(void *handle) break; case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size of Vega10, @@ -977,6 +977,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA20: soc15_program_register_sequence(adev, golden_settings_mmhub_1_0_0, ARRAY_SIZE(golden_settings_mmhub_1_0_0)); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 43f925773b57..3d53c4413f13 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -734,6 +734,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: mmhub_v1_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index df34dc79d444..365517c0121e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -34,10 +34,19 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 +/* vega20 */ +#define mmRCC_DEV0_EPF0_STRAP0_VG20 0x0011 +#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX 2 + static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + if (adev->asic_type == CHIP_VEGA20) + tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20); + else + tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -75,10 +84,14 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); u32 doorbell_range = RREG32(reg); + u32 range = 2; + + if (adev->asic_type == CHIP_VEGA20) + range = 8; if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); - doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, range); } else doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); @@ -133,6 +146,9 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade { uint32_t def, data; + if (adev->asic_type == CHIP_VEGA20) + return; + /* NBIF_MGCG_CTRL_LCLK */ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 196e75def1f2..0c768e388ace 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -41,6 +41,9 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin"); MODULE_FIRMWARE("amdgpu/vega10_asd.bin"); MODULE_FIRMWARE("amdgpu/vega12_sos.bin"); MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); +MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); +MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); + #define smnMP1_FIRMWARE_FLAGS 0x3010028 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 03a36cbe7557..ca53b3fba422 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -42,6 +42,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin"); MODULE_FIRMWARE("amdgpu/vega12_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/vega20_sdma.bin"); +MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin"); MODULE_FIRMWARE("amdgpu/raven_sdma.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L @@ -107,6 +109,28 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) }; +static const struct soc15_reg_golden golden_settings_sdma_4_2[] = +{ + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) +}; + static const struct soc15_reg_golden golden_settings_sdma_rv1[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002), @@ -139,6 +163,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_vg12, ARRAY_SIZE(golden_settings_sdma_vg12)); break; + case CHIP_VEGA20: + soc15_program_register_sequence(adev, + golden_settings_sdma_4_2, + ARRAY_SIZE(golden_settings_sdma_4_2)); + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_sdma_4_1, @@ -182,6 +211,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) case CHIP_VEGA12: chip_name = "vega12"; break; + case CHIP_VEGA20: + chip_name = "vega20"; + break; case CHIP_RAVEN: chip_name = "raven"; break; @@ -1516,6 +1548,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: sdma_v4_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 90065766fffb..68b4a22a8892 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -41,8 +41,6 @@ #include "sdma1/sdma1_4_0_offset.h" #include "hdp/hdp_4_0_offset.h" #include "hdp/hdp_4_0_sh_mask.h" -#include "mp/mp_9_0_offset.h" -#include "mp/mp_9_0_sh_mask.h" #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" @@ -53,6 +51,7 @@ #include "gfxhub_v1_0.h" #include "mmhub_v1_0.h" #include "df_v1_7.h" +#include "df_v3_6.h" #include "vega10_ih.h" #include "sdma_v4_0.h" #include "uvd_v7_0.h" @@ -489,16 +488,24 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_RAVEN: vega10_reg_base_init(adev); break; + case CHIP_VEGA20: + vega20_reg_base_init(adev); + break; default: return -EINVAL; } if (adev->flags & AMD_IS_APU) adev->nbio_funcs = &nbio_v7_0_funcs; + else if (adev->asic_type == CHIP_VEGA20) + adev->nbio_funcs = &nbio_v7_0_funcs; else adev->nbio_funcs = &nbio_v6_1_funcs; - adev->df_funcs = &df_v1_7_funcs; + if (adev->asic_type == CHIP_VEGA20) + adev->df_funcs = &df_v3_6_funcs; + else + adev->df_funcs = &df_v1_7_funcs; adev->nbio_funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) @@ -507,12 +514,15 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + if (adev->asic_type != CHIP_VEGA20) { + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + } if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) @@ -660,6 +670,27 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x14; break; + case CHIP_VEGA20: + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x28; + break; case CHIP_RAVEN: adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -679,8 +710,10 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = AMD_PG_SUPPORT_SDMA; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCN_MGCG; + + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | @@ -872,6 +905,7 @@ static int soc15_common_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); adev->nbio_funcs->update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index f70da8a29f86..1f714b7af520 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -55,5 +55,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, const u32 array_size); int vega10_reg_base_init(struct amdgpu_device *adev); +int vega20_reg_base_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index def865067edd..0942f492d2e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -47,6 +47,21 @@ #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value) +#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \ + do { \ + uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ + uint32_t loop = adev->usec_timeout; \ + while ((tmp_ & (mask)) != (expected_value)) { \ + udelay(2); \ + tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ + loop--; \ + if (!loop) { \ + ret = -ETIMEDOUT; \ + break; \ + } \ + } \ + } while (0) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 87cbb142dd0b..6fed3d7797a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) static int uvd_v4_2_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; uvd_v4_2_set_ring_funcs(adev); uvd_v4_2_set_irq_funcs(adev); @@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle) int r; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; @@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); return r; } @@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, static int uvd_v4_2_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -208,7 +209,7 @@ done: static int uvd_v4_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v4_2_stop(adev); @@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle) */ static int uvd_v4_2_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz; int i, j, r; u32 tmp; @@ -523,6 +524,18 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } +static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); + amdgpu_ring_write(ring, 0); + } +} + /** * uvd_v4_2_mc_resume - memory controller programming * @@ -536,7 +549,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) uint32_t size; /* programm the VCPU memory controller bits 0-27 */ - addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; + addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); WREG32(mmUVD_VCPU_CACHE_SIZE0, size); @@ -553,11 +566,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CACHE_SIZE2, size); /* bits 28-31 */ - addr = (adev->uvd.gpu_addr >> 28) & 0xF; + addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); /* bits 32-39 */ - addr = (adev->uvd.gpu_addr >> 32) & 0xFF; + addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); @@ -664,7 +677,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: UVD TRAP\n"); - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); return 0; } @@ -732,7 +745,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, .get_rptr = uvd_v4_2_ring_get_rptr, .get_wptr = uvd_v4_2_ring_get_wptr, @@ -745,7 +757,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .emit_fence = uvd_v4_2_ring_emit_fence, .test_ring = uvd_v4_2_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = uvd_v4_2_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, @@ -753,7 +765,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; + adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs; } static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { @@ -763,8 +775,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; + adev->uvd.inst->irq.num_types = 1; + adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs; } const struct amdgpu_ip_block_version uvd_v4_2_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 6445d55e7d5a..341ee6d55ce8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) static int uvd_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; uvd_v5_0_set_ring_funcs(adev); uvd_v5_0_set_irq_funcs(adev); @@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle) int r; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; @@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); return r; } @@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle) static int uvd_v5_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -204,7 +205,7 @@ done: static int uvd_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v5_0_stop(adev); @@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) /* programm memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = AMDGPU_UVD_FIRMWARE_OFFSET; size = AMDGPU_UVD_FIRMWARE_SIZE(adev); @@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) */ static int uvd_v5_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -540,6 +541,18 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } +static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); + amdgpu_ring_write(ring, 0); + } +} + static bool uvd_v5_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -586,7 +599,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: UVD TRAP\n"); - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); return 0; } @@ -840,7 +853,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, .get_rptr = uvd_v5_0_ring_get_rptr, .get_wptr = uvd_v5_0_ring_get_wptr, @@ -853,7 +865,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .emit_fence = uvd_v5_0_ring_emit_fence, .test_ring = uvd_v5_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = uvd_v5_0_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, @@ -861,7 +873,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; + adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs; } static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { @@ -871,8 +883,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; + adev->uvd.inst->irq.num_types = 1; + adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v5_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index ca6ab56357b5..bfddf97dd13e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32(mmUVD_RB_RPTR); else return RREG32(mmUVD_RB_RPTR2); @@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32(mmUVD_RB_WPTR); else return RREG32(mmUVD_RB_WPTR2); @@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else @@ -375,6 +375,7 @@ error: static int uvd_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; if (!(adev->flags & AMD_IS_APU) && (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) @@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; /* UVD ENC TRAP */ if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq); if (r) return r; } @@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle) if (!uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = NULL; + adev->uvd.inst->ring_enc[i].funcs = NULL; - adev->uvd.irq.num_types = 1; + adev->uvd.inst->irq.num_types = 1; adev->uvd.num_enc_rings = 0; DRM_INFO("UVD ENC is disabled\n"); } else { struct drm_sched_rq *rq; - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; } @@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle) return r; if (uvd_v6_0_enc_support(adev)) { - drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); } return amdgpu_uvd_sw_fini(adev); @@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle) static int uvd_v6_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int i, r; @@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle) if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -563,7 +564,7 @@ done: static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v6_0_stop(adev); @@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) /* programm memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = AMDGPU_UVD_FIRMWARE_OFFSET; size = AMDGPU_UVD_FIRMWARE_SIZE(adev); @@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, */ static int uvd_v6_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); if (uvd_v6_0_enc_support(adev)) { - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); - ring = &adev->uvd.ring_enc[1]; + ring = &adev->uvd.inst->ring_enc[1]; WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); @@ -1099,6 +1100,18 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0xE); } +static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); + amdgpu_ring_write(ring, 0); + } +} + static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) { uint32_t seq = ring->fence_drv.sync_seq; @@ -1158,10 +1171,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.srbm_soft_reset = 0; + adev->uvd.inst->srbm_soft_reset = 0; return false; } } @@ -1170,7 +1183,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; uvd_v6_0_stop(adev); @@ -1182,9 +1195,9 @@ static int uvd_v6_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1212,7 +1225,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; mdelay(5); @@ -1238,17 +1251,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); break; case 119: if (likely(uvd_v6_0_enc_support(adev))) - amdgpu_fence_process(&adev->uvd.ring_enc[0]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); else int_handled = false; break; case 120: if (likely(uvd_v6_0_enc_support(adev))) - amdgpu_fence_process(&adev->uvd.ring_enc[1]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); else int_handled = false; break; @@ -1531,7 +1544,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, .get_rptr = uvd_v6_0_ring_get_rptr, .get_wptr = uvd_v6_0_ring_get_wptr, @@ -1547,7 +1559,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = uvd_v6_0_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, @@ -1612,10 +1624,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) { if (adev->asic_type >= CHIP_POLARIS10) { - adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; + adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; DRM_INFO("UVD is enabled in VM mode\n"); } else { - adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; + adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; DRM_INFO("UVD is enabled in physical mode\n"); } } @@ -1625,7 +1637,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; + adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; DRM_INFO("UVD ENC is enabled in VM mode\n"); } @@ -1638,11 +1650,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) { if (uvd_v6_0_enc_support(adev)) - adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; + adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; else - adev->uvd.irq.num_types = 1; + adev->uvd.inst->irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; + adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v6_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 0ca63d588670..57d32f21b3a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -40,6 +40,8 @@ #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" +#define UVD7_MAX_HW_INSTANCES_VEGA20 2 + static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev); @@ -47,6 +49,11 @@ static int uvd_v7_0_start(struct amdgpu_device *adev); static void uvd_v7_0_stop(struct amdgpu_device *adev); static int uvd_v7_0_sriov_start(struct amdgpu_device *adev); +static int amdgpu_ih_clientid_uvds[] = { + SOC15_IH_CLIENTID_UVD, + SOC15_IH_CLIENTID_UVD1 +}; + /** * uvd_v7_0_ring_get_rptr - get read pointer * @@ -58,7 +65,7 @@ static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); } /** @@ -72,10 +79,10 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) - return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); else - return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); } /** @@ -89,7 +96,7 @@ static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); } /** @@ -106,10 +113,10 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; - if (ring == &adev->uvd.ring_enc[0]) - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); else - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); } /** @@ -123,7 +130,7 @@ static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } /** @@ -144,11 +151,11 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) return; } - if (ring == &adev->uvd.ring_enc[0]) - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, + WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); } @@ -170,8 +177,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 16); if (r) { - DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", - ring->idx, r); + DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n", + ring->me, ring->idx, r); return r; } amdgpu_ring_write(ring, HEVC_ENC_CMD_END); @@ -184,11 +191,11 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); + DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", + ring->me, ring->idx, i); } else { - DRM_ERROR("amdgpu: ring %d test failed\n", - ring->idx); + DRM_ERROR("amdgpu: (%d)ring %d test failed\n", + ring->me, ring->idx); r = -ETIMEDOUT; } @@ -342,24 +349,24 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r); goto error; } r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r); goto error; } r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me); r = -ETIMEDOUT; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r); } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx); r = 0; } error: @@ -370,6 +377,10 @@ error: static int uvd_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->asic_type == CHIP_VEGA20) + adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; + else + adev->uvd.num_uvd_inst = 1; if (amdgpu_sriov_vf(adev)) adev->uvd.num_enc_rings = 1; @@ -386,19 +397,21 @@ static int uvd_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; struct drm_sched_rq *rq; - int i, r; + int i, j, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq); - if (r) - return r; - - /* UVD ENC TRAP */ - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq); + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + /* UVD TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq); if (r) return r; + + /* UVD ENC TRAP */ + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq); + if (r) + return r; + } } r = amdgpu_uvd_sw_init(adev); @@ -415,43 +428,48 @@ static int uvd_v7_0_sw_init(void *handle) DRM_INFO("PSP loading UVD firmware\n"); } - ring = &adev->uvd.ring_enc[0]; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, - rq, NULL); - if (r) { - DRM_ERROR("Failed setting up UVD ENC run queue.\n"); - return r; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + ring = &adev->uvd.inst[j].ring_enc[0]; + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc, + rq, NULL); + if (r) { + DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j); + return r; + } } r = amdgpu_uvd_resume(adev); if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - ring = &adev->uvd.ring; - sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); - if (r) - return r; - } - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; - sprintf(ring->name, "uvd_enc%d", i); - if (amdgpu_sriov_vf(adev)) { - ring->use_doorbell = true; - - /* currently only use the first enconding ring for - * sriov, so set unused location for other unused rings. - */ - if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; - else - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + if (!amdgpu_sriov_vf(adev)) { + ring = &adev->uvd.inst[j].ring; + sprintf(ring->name, "uvd<%d>", j); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + if (r) + return r; + } + + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst[j].ring_enc[i]; + sprintf(ring->name, "uvd_enc%d<%d>", i, j); + if (amdgpu_sriov_vf(adev)) { + ring->use_doorbell = true; + + /* currently only use the first enconding ring for + * sriov, so set unused location for other unused rings. + */ + if (i == 0) + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + else + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; + } + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + if (r) + return r; } - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); - if (r) - return r; } r = amdgpu_virt_alloc_mm_table(adev); @@ -463,7 +481,7 @@ static int uvd_v7_0_sw_init(void *handle) static int uvd_v7_0_sw_fini(void *handle) { - int i, r; + int i, j, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_virt_free_mm_table(adev); @@ -472,11 +490,12 @@ static int uvd_v7_0_sw_fini(void *handle) if (r) return r; - drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); - - for (i = 0; i < adev->uvd.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc); + for (i = 0; i < adev->uvd.num_enc_rings; ++i) + amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); + } return amdgpu_uvd_sw_fini(adev); } @@ -490,9 +509,9 @@ static int uvd_v7_0_sw_fini(void *handle) static int uvd_v7_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring; uint32_t tmp; - int i, r; + int i, j, r; if (amdgpu_sriov_vf(adev)) r = uvd_v7_0_sriov_start(adev); @@ -501,57 +520,60 @@ static int uvd_v7_0_hw_init(void *handle) if (r) goto done; - if (!amdgpu_sriov_vf(adev)) { - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - goto done; - } - - r = amdgpu_ring_alloc(ring, 10); - if (r) { - DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); - goto done; + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + ring = &adev->uvd.inst[j].ring; + + if (!amdgpu_sriov_vf(adev)) { + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } + + r = amdgpu_ring_alloc(ring, 10); + if (r) { + DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r); + goto done; + } + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + /* Clear timeout status bits */ + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_TIMEOUT_STATUS), 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_CNTL), 0)); + amdgpu_ring_write(ring, 3); + + amdgpu_ring_commit(ring); } - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - /* Clear timeout status bits */ - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_TIMEOUT_STATUS), 0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_CNTL), 0)); - amdgpu_ring_write(ring, 3); - - amdgpu_ring_commit(ring); - } - - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - goto done; + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst[j].ring_enc[i]; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } } } - done: if (!r) DRM_INFO("UVD and UVD ENC initialized successfully.\n"); @@ -569,7 +591,7 @@ done: static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + int i; if (!amdgpu_sriov_vf(adev)) uvd_v7_0_stop(adev); @@ -578,7 +600,8 @@ static int uvd_v7_0_hw_fini(void *handle) DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); } - ring->ready = false; + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) + adev->uvd.inst[i].ring.ready = false; return 0; } @@ -618,48 +641,51 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) { uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev); uint32_t offset; + int i; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - offset = 0; - } else { - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); - offset = size; - } + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + offset = 0; + } else { + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr)); + offset = size; + } - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr + offset)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr + offset)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, - AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - - WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - WREG32_SOC15(UVD, 0, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); + + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); + + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, + AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); + + WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); + } } static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, @@ -669,6 +695,7 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, uint64_t addr = table->gpu_addr; struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr; uint32_t size; + int i; size = header->header_size + header->vce_table_size + header->uvd_table_size; @@ -688,11 +715,12 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); - WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); - adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0; - adev->uvd.ring_enc[0].wptr = 0; - adev->uvd.ring_enc[0].wptr_old = 0; - + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); + adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0; + adev->uvd.inst[i].ring_enc[0].wptr = 0; + adev->uvd.inst[i].ring_enc[0].wptr_old = 0; + } /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); @@ -725,6 +753,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) struct mmsch_v1_0_cmd_end end = { {0} }; uint32_t *init_table = adev->virt.mm_table.cpu_addr; struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table; + uint8_t i = 0; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; @@ -742,120 +771,121 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) init_table += header->uvd_table_offset; - ring = &adev->uvd.ring; - ring->wptr = 0; - size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); - - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), - 0xFFFFFFFF, 0x00000004); - /* mc resume*/ - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - offset = 0; - } else { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr)); - offset = size; + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + ring = &adev->uvd.inst[i].ring; + ring->wptr = 0; + size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); + + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + 0xFFFFFFFF, 0x00000004); + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + offset = 0; + } else { + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr)); + offset = size; + } + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); + /* mc resume end*/ + + /* disable clock gating */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL), + ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0); + + /* disable interupt */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + ~UVD_MASTINT_EN__VCPU_EN_MASK, 0); + + /* stall UMC and register bus before resetting VCPU */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + /* put LMI, VCPU, RBC etc... into reset */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), + (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)); + + /* initialize UVD memory controller */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL), + (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + 0x00100000L)); + + /* take all subblocks out of reset, except VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + + /* enable VCPU clock */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK); + + /* enable master interrupt */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); + + /* clear the bit 4 of UVD_STATUS */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0); + + /* force RBC into idle state */ + size = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + + ring = &adev->uvd.inst[i].ring_enc[0]; + ring->wptr = 0; + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4); + + /* boot up the VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0); + + /* enable UMC */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); + + MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02); } - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr + offset)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr + offset)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), - AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); - /* mc resume end*/ - - /* disable clock gating */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), - ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0); - - /* disable interupt */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - ~UVD_MASTINT_EN__VCPU_EN_MASK, 0); - - /* stall UMC and register bus before resetting VCPU */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - - /* put LMI, VCPU, RBC etc... into reset */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | - UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | - UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | - UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)); - - /* initialize UVD memory controller */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL), - (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__REQ_MODE_MASK | - 0x00100000L)); - - /* take all subblocks out of reset, except VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - - /* enable VCPU clock */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), - UVD_VCPU_CNTL__CLK_EN_MASK); - - /* enable master interrupt */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), - (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - - /* clear the bit 4 of UVD_STATUS */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0); - - /* force RBC into idle state */ - size = order_base_2(ring->ring_size); - tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); - - ring = &adev->uvd.ring_enc[0]; - ring->wptr = 0; - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); - - /* boot up the VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); - - /* enable UMC */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); - - MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); - /* add end packet */ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; @@ -874,15 +904,17 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) */ static int uvd_v7_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; - int i, j, r; + int i, j, k, r; - /* disable DPG */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, - ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { + /* disable DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + } /* disable byte swapping */ lmi_swap_cntl = 0; @@ -890,157 +922,159 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) uvd_v7_0_mc_resume(adev); - /* disable clock gating */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0, - ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK); - - /* disable interupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, - ~UVD_MASTINT_EN__VCPU_EN_MASK); - - /* stall UMC and register bus before resetting VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - mdelay(1); - - /* put LMI, VCPU, RBC etc... into reset */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | - UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | - UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | - UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); - mdelay(5); + for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { + ring = &adev->uvd.inst[k].ring; + /* disable clock gating */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0, + ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK); - /* initialize UVD memory controller */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, - (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__REQ_MODE_MASK | - 0x00100000L); + /* disable interupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* stall UMC and register bus before resetting VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + mdelay(1); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + mdelay(5); + + /* initialize UVD memory controller */ + WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL, + (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + 0x00100000L); #ifdef __BIG_ENDIAN - /* swap (8 in 32) RB and IB */ - lmi_swap_cntl = 0xa; - mp_swap_cntl = 0; + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; #endif - WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); - WREG32_SOC15(UVD, 0, mmUVD_MP_SWAP_CNTL, mp_swap_cntl); - - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88); - - /* take all subblocks out of reset, except VCPU */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl); - /* enable VCPU clock */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, - UVD_VCPU_CNTL__CLK_EN_MASK); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88); - /* enable UMC */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* take all subblocks out of reset, except VCPU */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* enable VCPU clock */ + WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL, + UVD_VCPU_CNTL__CLK_EN_MASK); + + /* enable UMC */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - /* boot up the VCPU */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0); - mdelay(10); + /* boot up the VCPU */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0); + mdelay(10); - for (i = 0; i < 10; ++i) { - uint32_t status; + for (i = 0; i < 10; ++i) { + uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); + for (j = 0; j < 100; ++j) { + status = RREG32_SOC15(UVD, k, mmUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; if (status & 2) break; + + DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k); + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); mdelay(10); + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + r = -1; } - r = 0; - if (status & 2) - break; - - DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } - if (r) { - DRM_ERROR("UVD not responding, giving up!!!\n"); - return r; - } - /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), - ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - - /* clear the bit 4 of UVD_STATUS */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - - /* force RBC into idle state */ - rb_bufsz = order_base_2(ring->ring_size); - tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); - - /* set the write pointer delay */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); - - /* set the wb address */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, - (upper_32_bits(ring->gpu_addr) >> 2)); - - /* programm the RB_BASE for ring buffer */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - - /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); - - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, - lower_32_bits(ring->wptr)); - - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, - ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - - ring = &adev->uvd.ring_enc[0]; - WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); + if (r) { + DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k); + return r; + } + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - ring = &adev->uvd.ring_enc[1]; - WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + /* clear the bit 4 of UVD_STATUS */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp); + + /* set the write pointer delay */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR, + (upper_32_bits(ring->gpu_addr) >> 2)); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0); + + ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0, + ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + + ring = &adev->uvd.inst[k].ring_enc[0]; + WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4); + + ring = &adev->uvd.inst[k].ring_enc[1]; + WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4); + } return 0; } @@ -1053,26 +1087,30 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) */ static void uvd_v7_0_stop(struct amdgpu_device *adev) { - /* force RBC into idle state */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101); - - /* Stall UMC and register bus before resetting VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - mdelay(1); - - /* put VCPU into reset */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + uint8_t i = 0; - /* disable VCPU clock */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0); + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + /* force RBC into idle state */ + WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + mdelay(1); + + /* put VCPU into reset */ + WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); - /* Unstall UMC and register bus */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* disable VCPU clock */ + WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + } } /** @@ -1091,26 +1129,26 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, addr & 0xffffffff); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 2); } @@ -1159,30 +1197,30 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD); + WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n", + ring->me, ring->idx, r); return r; } amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID); + tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); } if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); + DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", + ring->me, ring->idx, i); } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); + DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n", + ring->me, ring->idx, tmp); r = -EINVAL; } return r; @@ -1203,17 +1241,17 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0)); amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0)); amdgpu_ring_write(ring, ib->length_dw); } @@ -1241,13 +1279,13 @@ static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 8); } @@ -1257,16 +1295,16 @@ static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0)); amdgpu_ring_write(ring, mask); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 12); } @@ -1287,12 +1325,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - int i; struct amdgpu_device *adev = ring->adev; + int i; - for (i = 0; i < count; i++) - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); + WARN_ON(ring->wptr % 2 || count % 2); + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0)); + amdgpu_ring_write(ring, 0); + } } static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) @@ -1359,16 +1400,16 @@ static bool uvd_v7_0_check_soft_reset(void *handle) if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || - (RREG32_SOC15(UVD, 0, mmUVD_STATUS) & + (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.srbm_soft_reset = 0; + adev->uvd.inst[ring->me].srbm_soft_reset = 0; return false; } } @@ -1377,7 +1418,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; uvd_v7_0_stop(adev); @@ -1389,9 +1430,9 @@ static int uvd_v7_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1419,7 +1460,7 @@ static int uvd_v7_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; mdelay(5); @@ -1441,17 +1482,32 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_UVD: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_UVD1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + DRM_DEBUG("IH: UVD TRAP\n"); + switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring); break; case 119: - amdgpu_fence_process(&adev->uvd.ring_enc[0]); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]); break; case 120: if (!amdgpu_sriov_vf(adev)) - amdgpu_fence_process(&adev->uvd.ring_enc[1]); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -1467,9 +1523,9 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) { uint32_t data, data1, data2, suvd_flags; - data = RREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL); - data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE); - data2 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL); + data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); + data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL); data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); @@ -1513,18 +1569,18 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); data1 |= suvd_flags; - WREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL, data); - WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, 0); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL, data2); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2); } static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) { uint32_t data, data1, cgc_flags, suvd_flags; - data = RREG32_SOC15(UVD, 0, mmUVD_CGC_GATE); - data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE); + data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE); + data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); cgc_flags = UVD_CGC_GATE__SYS_MASK | UVD_CGC_GATE__UDEC_MASK | @@ -1556,8 +1612,8 @@ static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) data |= cgc_flags; data1 |= suvd_flags; - WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, data); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); } static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) @@ -1616,7 +1672,7 @@ static int uvd_v7_0_set_powergating_state(void *handle, if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; - WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); + WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); if (state == AMD_PG_STATE_GATE) { uvd_v7_0_stop(adev); @@ -1657,7 +1713,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(0x81ff, 0), .support_64bit_ptrs = false, .vmhub = AMDGPU_MMHUB, .get_rptr = uvd_v7_0_ring_get_rptr, @@ -1719,18 +1774,27 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs; - DRM_INFO("UVD is enabled in VM mode\n"); + int i; + + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; + adev->uvd.inst[i].ring.me = i; + DRM_INFO("UVD(%d) is enabled in VM mode\n", i); + } } static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev) { - int i; + int i, j; - for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; + adev->uvd.inst[j].ring_enc[i].me = j; + } - DRM_INFO("UVD ENC is enabled in VM mode\n"); + DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j); + } } static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { @@ -1740,8 +1804,12 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; - adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs; + int i; + + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; + adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; + } } const struct amdgpu_ip_block_version uvd_v7_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 0501746b6c2c..110b294ebed3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -35,7 +35,6 @@ #include "mmhub/mmhub_9_1_offset.h" #include "mmhub/mmhub_9_1_sh_mask.h" -static int vcn_v1_0_start(struct amdgpu_device *adev); static int vcn_v1_0_stop(struct amdgpu_device *adev); static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); @@ -146,10 +145,6 @@ static int vcn_v1_0_hw_init(void *handle) struct amdgpu_ring *ring = &adev->vcn.ring_dec; int i, r; - r = vcn_v1_0_start(adev); - if (r) - goto done; - ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -185,11 +180,9 @@ static int vcn_v1_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->vcn.ring_dec; - int r; - r = vcn_v1_0_stop(adev); - if (r) - return r; + if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) + vcn_v1_0_stop(adev); ring->ready = false; @@ -288,14 +281,14 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) * * Disable clock gating for VCN block */ -static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw) +static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; /* JPEG disable CGC */ data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - if (sw) + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK; @@ -310,7 +303,7 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw) /* UVD disable CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); - if (sw) + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; @@ -415,13 +408,13 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw) * * Enable clock gating for VCN block */ -static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw) +static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data = 0; /* enable JPEG CGC */ data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - if (sw) + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; @@ -435,7 +428,7 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw) /* enable UVD CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); - if (sw) + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; @@ -480,6 +473,94 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw) WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); } +static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + int ret; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); + + WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret); + } else { + data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); + WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret); + } + + /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */ + + data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); + data &= ~0x103; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK; + + WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); +} + +static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + int ret; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + /* Before power off, this indicator has to be turned on */ + data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); + data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; + data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; + WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); + + + data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); + + WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); + + data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret); + } +} + /** * vcn_v1_0_start - start VCN block * @@ -499,8 +580,9 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) vcn_v1_0_mc_resume(adev); + vcn_1_0_disable_static_power_gating(adev); /* disable clock gating */ - vcn_v1_0_disable_clock_gating(adev, true); + vcn_v1_0_disable_clock_gating(adev); /* disable interupt */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, @@ -680,16 +762,45 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - /* enable clock gating */ - vcn_v1_0_enable_clock_gating(adev, true); + WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0); + vcn_v1_0_enable_clock_gating(adev); + vcn_1_0_enable_static_power_gating(adev); return 0; } +bool vcn_v1_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2); +} + +int vcn_v1_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; + + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret); + + return ret; +} + static int vcn_v1_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { - /* needed for driver unload*/ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + /* wait for STATUS to clear */ + if (vcn_v1_0_is_idle(handle)) + return -EBUSY; + vcn_v1_0_enable_clock_gating(adev); + } else { + /* disable HW gating and enable Sw gating */ + vcn_v1_0_disable_clock_gating(adev); + } return 0; } @@ -1048,16 +1159,36 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - int i; struct amdgpu_device *adev = ring->adev; + int i; - for (i = 0; i < count; i++) - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); + WARN_ON(ring->wptr % 2 || count % 2); + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); + amdgpu_ring_write(ring, 0); + } } +static int vcn_v1_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + /* This doesn't actually powergate the VCN block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_PG_STATE_GATE) + return vcn_v1_0_stop(adev); + else + return vcn_v1_0_start(adev); +} static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", @@ -1069,20 +1200,19 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .hw_fini = vcn_v1_0_hw_fini, .suspend = vcn_v1_0_suspend, .resume = vcn_v1_0_resume, - .is_idle = NULL /* vcn_v1_0_is_idle */, - .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */, + .is_idle = vcn_v1_0_is_idle, + .wait_for_idle = vcn_v1_0_wait_for_idle, .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, .soft_reset = NULL /* vcn_v1_0_soft_reset */, .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, .set_clockgating_state = vcn_v1_0_set_clockgating_state, - .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */, + .set_powergating_state = vcn_v1_0_set_powergating_state, }; static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, - .nop = PACKET0(0x81ff, 0), .support_64bit_ptrs = false, .vmhub = AMDGPU_MMHUB, .get_rptr = vcn_v1_0_dec_ring_get_rptr, @@ -1101,7 +1231,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush, .test_ring = amdgpu_vcn_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, - .insert_nop = vcn_v1_0_ring_insert_nop, + .insert_nop = vcn_v1_0_dec_ring_insert_nop, .insert_start = vcn_v1_0_dec_ring_insert_start, .insert_end = vcn_v1_0_dec_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c new file mode 100644 index 000000000000..52778de93ab0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -0,0 +1,53 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "soc15.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "vega20_ip_offset.h" + +int vega20_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialized the blocke beend by our driver */ + uint32_t i; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i])); + adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + } + return 0; +} + + |