diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
111 files changed, 5643 insertions, 1669 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index a82d36ea88e2..12adca8c7819 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -3,6 +3,7 @@ config DRM_AMDGPU tristate "AMD GPU" depends on DRM && PCI && MMU + depends on !UML select FW_LOADER select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_HELPER @@ -19,6 +20,7 @@ config DRM_AMDGPU select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE select DRM_BUDDY + select DRM_SUBALLOC_HELPER # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work # ACPI_VIDEO's dependencies must also be selected. select INPUT if ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 1d72cbc85348..415a7fa395c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \ amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \ - amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o \ + amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o amdgpu_hdp.o \ amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ @@ -77,7 +77,8 @@ amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \ - sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o + sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \ + nbio_v7_9.o # add DF block amdgpu-y += \ @@ -92,7 +93,7 @@ amdgpu-y += \ gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \ gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \ mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o mmhub_v3_0_2.o gmc_v11_0.o \ - mmhub_v3_0_1.o gfxhub_v3_0_3.o + mmhub_v3_0_1.o gfxhub_v3_0_3.o gfxhub_v1_2.o mmhub_v1_8.o # add UMC block amdgpu-y += \ @@ -135,6 +136,7 @@ amdgpu-y += \ gfx_v9_0.o \ gfx_v9_4.o \ gfx_v9_4_2.o \ + gfx_v9_4_3.o \ gfx_v10_0.o \ imu_v11_0.o \ gfx_v11_0.o \ @@ -148,6 +150,7 @@ amdgpu-y += \ sdma_v3_0.o \ sdma_v4_0.o \ sdma_v4_4.o \ + sdma_v4_4_2.o \ sdma_v5_0.o \ sdma_v5_2.o \ sdma_v6_0.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 39018f784f9c..02b827785e39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -50,7 +50,6 @@ #include <linux/hashtable.h> #include <linux/dma-fence.h> #include <linux/pci.h> -#include <linux/aer.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> @@ -186,7 +185,6 @@ extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; extern uint amdgpu_pp_feature_mask; extern uint amdgpu_force_long_training; -extern int amdgpu_job_hang_limit; extern int amdgpu_lbpw; extern int amdgpu_compute_multipipe; extern int amdgpu_gpu_recovery; @@ -424,29 +422,11 @@ struct amdgpu_clock { * alignment). */ -#define AMDGPU_SA_NUM_FENCE_LISTS 32 - struct amdgpu_sa_manager { - wait_queue_head_t wq; - struct amdgpu_bo *bo; - struct list_head *hole; - struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; - struct list_head olist; - unsigned size; - uint64_t gpu_addr; - void *cpu_ptr; - uint32_t domain; - uint32_t align; -}; - -/* sub-allocation buffer */ -struct amdgpu_sa_bo { - struct list_head olist; - struct list_head flist; - struct amdgpu_sa_manager *manager; - unsigned soffset; - unsigned eoffset; - struct dma_fence *fence; + struct drm_suballoc_manager base; + struct amdgpu_bo *bo; + uint64_t gpu_addr; + void *cpu_ptr; }; int amdgpu_fence_slab_init(void); @@ -490,7 +470,7 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); /* * Writeback */ -#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */ +#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ struct amdgpu_wb { struct amdgpu_bo *wb_obj; @@ -1023,7 +1003,6 @@ struct amdgpu_device { bool in_runpm; bool has_pr3; - bool pm_sysfs_en; bool ucode_sysfs_en; bool psp_sysfs_en; @@ -1113,18 +1092,14 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr); u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr); void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u32 reg_data); void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u64 reg_data); - +u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); @@ -1246,7 +1221,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev); ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r))) #define amdgpu_asic_invalidate_hdp(adev, r) \ ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \ - ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : 0)) + ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0)) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f99d4873bf22..0385f7f69278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -96,7 +96,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, size_t *start_offset) { /* - * The first num_doorbells are used by amdgpu. + * The first num_kernel_doorbells are used by amdgpu. * amdkfd takes whatever's left in the aperture. */ if (adev->enable_mes) { @@ -109,11 +109,11 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, *aperture_base = adev->doorbell.base; *aperture_size = 0; *start_offset = 0; - } else if (adev->doorbell.size > adev->doorbell.num_doorbells * + } else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells * sizeof(u32)) { *aperture_base = adev->doorbell.base; *aperture_size = adev->doorbell.size; - *start_offset = adev->doorbell.num_doorbells * sizeof(u32); + *start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32); } else { *aperture_base = 0; *aperture_size = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 333780491867..01ba3589b60a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -308,6 +308,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); +int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, + struct dma_buf **dmabuf); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index d6320c836251..83a83ced2439 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -82,6 +82,25 @@ static bool kfd_mem_is_attached(struct amdgpu_vm *avm, return false; } +/** + * reuse_dmamap() - Check whether adev can share the original + * userptr BO + * + * If both adev and bo_adev are in direct mapping or + * in the same iommu group, they can share the original BO. + * + * @adev: Device to which can or cannot share the original BO + * @bo_adev: Device to which allocated BO belongs to + * + * Return: returns true if adev can share original userptr BO, + * false otherwise. + */ +static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) +{ + return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) || + (adev->dev->iommu_group == bo_adev->dev->iommu_group); +} + /* Set memory usage limits. Current, limits are * System (TTM + userptr) memory - 15/16th System RAM * TTM memory - 3/8th System RAM @@ -253,15 +272,19 @@ create_dmamap_sg_bo(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_bo **bo_out) { struct drm_gem_object *gem_obj; - int ret, align; + int ret; + uint64_t flags = 0; ret = amdgpu_bo_reserve(mem->bo, false); if (ret) return ret; - align = 1; - ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, align, - AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE, + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) + flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT | + AMDGPU_GEM_CREATE_UNCACHED); + + ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, + AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags, ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj); amdgpu_bo_unreserve(mem->bo); @@ -481,9 +504,6 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem, if (unlikely(ret)) goto release_sg; - drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, - ttm->num_pages); - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) @@ -711,6 +731,21 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, } } +static int kfd_mem_export_dmabuf(struct kgd_mem *mem) +{ + if (!mem->dmabuf) { + struct dma_buf *ret = amdgpu_gem_prime_export( + &mem->bo->tbo.base, + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DRM_RDWR : 0); + if (IS_ERR(ret)) + return PTR_ERR(ret); + mem->dmabuf = ret; + } + + return 0; +} + static int kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_bo **bo) @@ -718,16 +753,9 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, struct drm_gem_object *gobj; int ret; - if (!mem->dmabuf) { - mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, - mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? - DRM_RDWR : 0); - if (IS_ERR(mem->dmabuf)) { - ret = PTR_ERR(mem->dmabuf); - mem->dmabuf = NULL; - return ret; - } - } + ret = kfd_mem_export_dmabuf(mem); + if (ret) + return ret; gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); if (IS_ERR(gobj)) @@ -797,11 +825,11 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, va + bo_size, vm); if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || - (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) || - same_hive) { + (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) || + same_hive) { /* Mappings on the local GPU, or VRAM mappings in the - * local hive, or userptr mapping IOMMU direct map mode - * share the original BO + * local hive, or userptr mapping can reuse dma map + * address space share the original BO */ attachment[i]->type = KFD_MEM_ATT_SHARED; bo[i] = mem->bo; @@ -1575,7 +1603,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); - size_t available; + ssize_t available; spin_lock(&kfd_mem_limit.mem_limit_lock); available = adev->gmc.real_vram_size @@ -1584,6 +1612,9 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev) - reserved_for_pt; spin_unlock(&kfd_mem_limit.mem_limit_lock); + if (available < 0) + available = 0; + return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN); } @@ -2210,30 +2241,27 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, struct amdgpu_bo *bo; int ret; - if (dma_buf->ops != &amdgpu_dmabuf_ops) - /* Can't handle non-graphics buffers */ - return -EINVAL; - - obj = dma_buf->priv; - if (drm_to_adev(obj->dev) != adev) - /* Can't handle buffers from other devices */ - return -EINVAL; + obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf); + if (IS_ERR(obj)) + return PTR_ERR(obj); bo = gem_to_amdgpu_bo(obj); if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT))) + AMDGPU_GEM_DOMAIN_GTT))) { /* Only VRAM and GTT BOs are supported */ - return -EINVAL; + ret = -EINVAL; + goto err_put_obj; + } *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); - if (!*mem) - return -ENOMEM; + if (!*mem) { + ret = -ENOMEM; + goto err_put_obj; + } ret = drm_vma_node_allow(&obj->vma_node, drm_priv); - if (ret) { - kfree(*mem); - return ret; - } + if (ret) + goto err_free_mem; if (size) *size = amdgpu_bo_size(bo); @@ -2250,7 +2278,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; - drm_gem_object_get(&bo->tbo.base); + get_dma_buf(dma_buf); + (*mem)->dmabuf = dma_buf; (*mem)->bo = bo; (*mem)->va = va; (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? @@ -2262,6 +2291,29 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, (*mem)->is_imported = true; return 0; + +err_free_mem: + kfree(*mem); +err_put_obj: + drm_gem_object_put(obj); + return ret; +} + +int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, + struct dma_buf **dma_buf) +{ + int ret; + + mutex_lock(&mem->lock); + ret = kfd_mem_export_dmabuf(mem); + if (ret) + goto out; + + get_dma_buf(mem->dmabuf); + *dma_buf = mem->dmabuf; +out: + mutex_unlock(&mem->lock); + return ret; } /* Evict a userptr BO by stopping the queues if necessary diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 08eced097bd8..2eb2c66843a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1276,7 +1276,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, r = drm_sched_job_add_dependency(&leader->base, fence); if (r) { dma_fence_put(fence); - goto error_cleanup; + return r; } } @@ -1303,7 +1303,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, } if (r) { r = -EAGAIN; - goto error_unlock; + mutex_unlock(&p->adev->notifier_lock); + return r; } p->fence = dma_fence_get(&leader->base.s_fence->finished); @@ -1350,14 +1351,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, mutex_unlock(&p->adev->notifier_lock); mutex_unlock(&p->bo_list->bo_list_mutex); return 0; - -error_unlock: - mutex_unlock(&p->adev->notifier_lock); - -error_cleanup: - for (i = 0; i < p->gang_size; ++i) - drm_sched_job_cleanup(&p->jobs[i]->base); - return r; } /* Cleanup the parser structure */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3d98fc2ad36b..981a9cfb63b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -35,6 +35,7 @@ #include <linux/devcoredump.h> #include <generated/utsrelease.h> #include <linux/pci-p2pdma.h> +#include <linux/apple-gmux.h> #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> @@ -601,7 +602,7 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) if (amdgpu_device_skip_hw_access(adev)) return 0; - if (index < adev->doorbell.num_doorbells) { + if (index < adev->doorbell.num_kernel_doorbells) { return readl(adev->doorbell.ptr + index); } else { DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); @@ -624,7 +625,7 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) if (amdgpu_device_skip_hw_access(adev)) return; - if (index < adev->doorbell.num_doorbells) { + if (index < adev->doorbell.num_kernel_doorbells) { writel(v, adev->doorbell.ptr + index); } else { DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); @@ -645,7 +646,7 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) if (amdgpu_device_skip_hw_access(adev)) return 0; - if (index < adev->doorbell.num_doorbells) { + if (index < adev->doorbell.num_kernel_doorbells) { return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); } else { DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); @@ -668,7 +669,7 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) if (amdgpu_device_skip_hw_access(adev)) return; - if (index < adev->doorbell.num_doorbells) { + if (index < adev->doorbell.num_kernel_doorbells) { atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); } else { DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); @@ -679,20 +680,20 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) * amdgpu_device_indirect_rreg - read an indirect register * * @adev: amdgpu_device pointer - * @pcie_index: mmio register offset - * @pcie_data: mmio register offset * @reg_addr: indirect register address to read from * * Returns the value of indirect register @reg_addr */ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr) { - unsigned long flags; - u32 r; + unsigned long flags, pcie_index, pcie_data; void __iomem *pcie_index_offset; void __iomem *pcie_data_offset; + u32 r; + + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; @@ -710,20 +711,20 @@ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, * amdgpu_device_indirect_rreg64 - read a 64bits indirect register * * @adev: amdgpu_device pointer - * @pcie_index: mmio register offset - * @pcie_data: mmio register offset * @reg_addr: indirect register address to read from * * Returns the value of indirect register @reg_addr */ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr) { - unsigned long flags; - u64 r; + unsigned long flags, pcie_index, pcie_data; void __iomem *pcie_index_offset; void __iomem *pcie_data_offset; + u64 r; + + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; @@ -753,13 +754,15 @@ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, * */ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u32 reg_data) { - unsigned long flags; + unsigned long flags, pcie_index, pcie_data; void __iomem *pcie_index_offset; void __iomem *pcie_data_offset; + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; @@ -782,13 +785,15 @@ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, * */ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u64 reg_data) { - unsigned long flags; + unsigned long flags, pcie_index, pcie_data; void __iomem *pcie_index_offset; void __iomem *pcie_data_offset; + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; @@ -807,6 +812,18 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, } /** + * amdgpu_device_get_rev_id - query device rev_id + * + * @adev: amdgpu_device pointer + * + * Return device rev_id + */ +u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev) +{ + return adev->nbio.funcs->get_rev_id(adev); +} + +/** * amdgpu_invalid_rreg - dummy reg read function * * @adev: amdgpu_device pointer @@ -1043,7 +1060,7 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) if (adev->asic_type < CHIP_BONAIRE) { adev->doorbell.base = 0; adev->doorbell.size = 0; - adev->doorbell.num_doorbells = 0; + adev->doorbell.num_kernel_doorbells = 0; adev->doorbell.ptr = NULL; return 0; } @@ -1058,27 +1075,27 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) adev->doorbell.size = pci_resource_len(adev->pdev, 2); if (adev->enable_mes) { - adev->doorbell.num_doorbells = + adev->doorbell.num_kernel_doorbells = adev->doorbell.size / sizeof(u32); } else { - adev->doorbell.num_doorbells = + adev->doorbell.num_kernel_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), adev->doorbell_index.max_assignment+1); - if (adev->doorbell.num_doorbells == 0) + if (adev->doorbell.num_kernel_doorbells == 0) return -EINVAL; /* For Vega, reserve and map two pages on doorbell BAR since SDMA * paging queue doorbell use the second page. The * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the * doorbells are in the first page. So with paging queue enabled, - * the max num_doorbells should + 1 page (0x400 in dword) + * the max num_kernel_doorbells should + 1 page (0x400 in dword) */ if (adev->asic_type >= CHIP_VEGA10) - adev->doorbell.num_doorbells += 0x400; + adev->doorbell.num_kernel_doorbells += 0x400; } adev->doorbell.ptr = ioremap(adev->doorbell.base, - adev->doorbell.num_doorbells * + adev->doorbell.num_kernel_doorbells * sizeof(u32)); if (adev->doorbell.ptr == NULL) return -ENOMEM; @@ -2167,7 +2184,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->has_pr3 = parent ? pci_pr3_present(parent) : false; } - amdgpu_amdkfd_device_probe(adev); adev->pm.pp_feature = amdgpu_pp_feature_mask; if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) @@ -2223,6 +2239,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (!total) return -ENODEV; + amdgpu_amdkfd_device_probe(adev); adev->cg_flags &= amdgpu_cg_mask; adev->pg_flags &= amdgpu_pg_mask; @@ -2348,7 +2365,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) } r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, - ring->num_hw_submission, amdgpu_job_hang_limit, + ring->num_hw_submission, 0, timeout, adev->reset_domain->wq, ring->sched_score, ring->name, adev->dev); @@ -2522,8 +2539,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_fru_get_product_info(adev); init_failed: - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_release_full_gpu(adev, true); return r; } @@ -3164,9 +3179,11 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, + AMD_IP_BLOCK_TYPE_MES, AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_VCE, - AMD_IP_BLOCK_TYPE_VCN + AMD_IP_BLOCK_TYPE_VCN, + AMD_IP_BLOCK_TYPE_JPEG }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { @@ -3286,9 +3303,11 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; - r = amdgpu_amdkfd_resume_iommu(adev); - if (r) - return r; + if (!adev->in_s0ix) { + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + return r; + } r = amdgpu_device_ip_resume_phase1(adev); if (r) @@ -3559,6 +3578,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, int r, i; bool px = false; u32 max_MBps; + int tmp; adev->shutdown = false; adev->flags = flags; @@ -3780,7 +3800,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } } else { + tmp = amdgpu_reset_method; + /* It should do a default reset when loading or reloading the driver, + * regardless of the module parameter reset_method. + */ + amdgpu_reset_method = AMD_RESET_METHOD_NONE; r = amdgpu_asic_reset(adev); + amdgpu_reset_method = tmp; if (r) { dev_err(adev->dev, "asic reset on init failed\n"); goto failed; @@ -3788,8 +3814,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } - pci_enable_pcie_error_reporting(adev->pdev); - /* Post card if necessary */ if (amdgpu_device_need_post(adev)) { if (!adev->bios) { @@ -3840,18 +3864,6 @@ fence_driver_init: r = amdgpu_device_ip_init(adev); if (r) { - /* failed in exclusive mode due to timeout */ - if (amdgpu_sriov_vf(adev) && - !amdgpu_sriov_runtime(adev) && - amdgpu_virt_mmio_blocked(adev) && - !amdgpu_virt_wait_reset(adev)) { - dev_err(adev->dev, "VF exclusive mode timeout\n"); - /* Don't send request since VF is inactive. */ - adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; - adev->virt.ops = NULL; - r = -EAGAIN; - goto release_ras_con; - } dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); goto release_ras_con; @@ -3879,11 +3891,8 @@ fence_driver_init: adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); r = amdgpu_pm_sysfs_init(adev); - if (r) { - adev->pm_sysfs_en = false; - DRM_ERROR("registering pm debugfs failed (%d).\n", r); - } else - adev->pm_sysfs_en = true; + if (r) + DRM_ERROR("registering pm sysfs failed (%d).\n", r); r = amdgpu_ucode_sysfs_init(adev); if (r) { @@ -3923,8 +3932,10 @@ fence_driver_init: msecs_to_jiffies(AMDGPU_RESUME_MS)); } - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_release_full_gpu(adev, true); flush_delayed_work(&adev->delayed_init_work); + } r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); if (r) @@ -3945,12 +3956,15 @@ fence_driver_init: if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); - if (amdgpu_device_supports_px(ddev)) { - px = true; + px = amdgpu_device_supports_px(ddev); + + if (px || (!pci_is_thunderbolt_attached(adev->pdev) && + apple_gmux_detect(NULL, NULL))) vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, px); + + if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - } if (adev->gmc.xgmi.pending_reset) queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, @@ -3961,6 +3975,20 @@ fence_driver_init: return 0; release_ras_con: + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, true); + + /* failed in exclusive mode due to timeout */ + if (amdgpu_sriov_vf(adev) && + !amdgpu_sriov_runtime(adev) && + amdgpu_virt_mmio_blocked(adev) && + !amdgpu_virt_wait_reset(adev)) { + dev_err(adev->dev, "VF exclusive mode timeout\n"); + /* Don't send request since VF is inactive. */ + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + adev->virt.ops = NULL; + r = -EAGAIN; + } amdgpu_release_ras_context(adev); failed: @@ -4026,7 +4054,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) if (adev->mman.initialized) drain_workqueue(adev->mman.bdev.wq); - if (adev->pm_sysfs_en) + if (adev->pm.sysfs_initialized) amdgpu_pm_sysfs_fini(adev); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); @@ -4054,6 +4082,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) void amdgpu_device_fini_sw(struct amdgpu_device *adev) { int idx; + bool px; amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); @@ -4072,10 +4101,16 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; - if (amdgpu_device_supports_px(adev_to_drm(adev))) { + + px = amdgpu_device_supports_px(adev_to_drm(adev)); + + if (px || (!pci_is_thunderbolt_attached(adev->pdev) && + apple_gmux_detect(NULL, NULL))) vga_switcheroo_unregister_client(adev->pdev); + + if (px) vga_switcheroo_fini_domain_pm_ops(adev->dev); - } + if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_unregister(adev->pdev); @@ -5154,6 +5189,7 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * @job: which job trigger hang + * @reset_context: amdgpu reset context pointer * * Attempt to reset the GPU if it has hung (all asics). * Attempt to do soft-reset or full-reset and reinitialize Asic @@ -5323,8 +5359,9 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (r) adev->asic_reset_res = r; - /* Aldebaran supports ras in SRIOV, so need resume ras during reset */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) amdgpu_ras_resume(adev); } else { r = amdgpu_do_asic_reset(device_list_handle, reset_context); @@ -5593,7 +5630,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev) struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!amdgpu_device_supports_baco(adev_to_drm(adev))) + if (!amdgpu_device_supports_baco(dev)) return -ENOTSUPP; if (ras && adev->ras_enabled && @@ -5609,7 +5646,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev) struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int ret = 0; - if (!amdgpu_device_supports_baco(adev_to_drm(adev))) + if (!amdgpu_device_supports_baco(dev)) return -ENOTSUPP; ret = amdgpu_dpm_baco_exit(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 1a3cb53d2e0d..0ecce0b92b82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -37,10 +37,12 @@ #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" +#include "nbio_v7_9.h" #include "hdp_v4_0.h" #include "vega10_ih.h" #include "vega20_ih.h" #include "sdma_v4_0.h" +#include "sdma_v4_4_2.h" #include "uvd_v7_0.h" #include "vce_v4_0.h" #include "vcn_v1_0.h" @@ -711,7 +713,7 @@ static void ip_hw_instance_release(struct kobject *kobj) kfree(ip_hw_instance); } -static struct kobj_type ip_hw_instance_ktype = { +static const struct kobj_type ip_hw_instance_ktype = { .release = ip_hw_instance_release, .sysfs_ops = &ip_hw_instance_sysfs_ops, .default_groups = ip_hw_instance_groups, @@ -730,7 +732,7 @@ static void ip_hw_id_release(struct kobject *kobj) kfree(ip_hw_id); } -static struct kobj_type ip_hw_id_ktype = { +static const struct kobj_type ip_hw_id_ktype = { .release = ip_hw_id_release, .sysfs_ops = &kobj_sysfs_ops, }; @@ -793,18 +795,18 @@ static const struct sysfs_ops ip_die_entry_sysfs_ops = { .show = ip_die_entry_attr_show, }; -static struct kobj_type ip_die_entry_ktype = { +static const struct kobj_type ip_die_entry_ktype = { .release = ip_die_entry_release, .sysfs_ops = &ip_die_entry_sysfs_ops, .default_groups = ip_die_entry_groups, }; -static struct kobj_type die_kobj_ktype = { +static const struct kobj_type die_kobj_ktype = { .release = die_kobj_release, .sysfs_ops = &kobj_sysfs_ops, }; -static struct kobj_type ip_discovery_ktype = { +static const struct kobj_type ip_discovery_ktype = { .release = ip_disc_release, .sysfs_ops = &kobj_sysfs_ops, }; @@ -1500,6 +1502,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): + case IP_VERSION(9, 4, 3): amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); break; case IP_VERSION(10, 1, 10): @@ -1545,6 +1548,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): + case IP_VERSION(9, 4, 3): amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); break; case IP_VERSION(10, 1, 10): @@ -1591,6 +1595,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 2, 0): case IP_VERSION(4, 2, 1): case IP_VERSION(4, 4, 0): + case IP_VERSION(4, 4, 2): amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); break; case IP_VERSION(5, 0, 0): @@ -1649,6 +1654,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 3): case IP_VERSION(13, 0, 5): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): @@ -1842,6 +1848,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 4, 0): amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); break; + case IP_VERSION(4, 4, 2): + amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); + break; case IP_VERSION(5, 0, 0): case IP_VERSION(5, 0, 1): case IP_VERSION(5, 0, 2): @@ -1942,9 +1951,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 0, 2): case IP_VERSION(4, 0, 4): amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); - break; + amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); + return 0; default: dev_err(adev->dev, "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", @@ -2175,6 +2183,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): + case IP_VERSION(9, 4, 3): adev->family = AMDGPU_FAMILY_AI; break; case IP_VERSION(9, 1, 0): @@ -2259,6 +2268,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->nbio.funcs = &nbio_v7_4_funcs; adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; break; + case IP_VERSION(7, 9, 0): + adev->nbio.funcs = &nbio_v7_9_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; + break; case IP_VERSION(7, 2, 0): case IP_VERSION(7, 2, 1): case IP_VERSION(7, 3, 0): @@ -2304,6 +2317,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 2, 0): case IP_VERSION(4, 2, 1): case IP_VERSION(4, 4, 0): + case IP_VERSION(4, 4, 2): adev->hdp.funcs = &hdp_v4_0_funcs; break; case IP_VERSION(5, 0, 0): diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index 7199b6b0be81..8fd11497faba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -21,6 +21,9 @@ * */ +#ifndef AMDGPU_DOORBELL_H +#define AMDGPU_DOORBELL_H + /* * GPU doorbell structures, functions & helpers */ @@ -29,7 +32,9 @@ struct amdgpu_doorbell { resource_size_t base; resource_size_t size; u32 __iomem *ptr; - u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ + + /* Number of doorbells reserved for amdgpu kernel driver */ + u32 num_kernel_doorbells; }; /* Reserved doorbells for amdgpu (including multimedia). @@ -306,3 +311,4 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) #define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v)) +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index ba5def374368..b1ca1ab6d6ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -148,8 +148,8 @@ uint amdgpu_pcie_lane_cap; u64 amdgpu_cg_mask = 0xffffffffffffffff; uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; -char *amdgpu_disable_cu = NULL; -char *amdgpu_virtual_display = NULL; +char *amdgpu_disable_cu; +char *amdgpu_virtual_display; /* * OverDrive(bit 14) disabled by default @@ -157,7 +157,6 @@ char *amdgpu_virtual_display = NULL; */ uint amdgpu_pp_feature_mask = 0xfff7bfff; uint amdgpu_force_long_training; -int amdgpu_job_hang_limit; int amdgpu_lbpw = -1; int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ @@ -521,13 +520,6 @@ MODULE_PARM_DESC(virtual_display, module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); /** - * DOC: job_hang_limit (int) - * Set how much time allow a job hang and not drop it. The default is 0. - */ -MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)"); -module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444); - -/** * DOC: lbpw (int) * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled). */ @@ -822,7 +814,7 @@ MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = defa * DOC: no_queue_eviction_on_vm_fault (int) * If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction). */ -int amdgpu_no_queue_eviction_on_vm_fault = 0; +int amdgpu_no_queue_eviction_on_vm_fault; MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)"); module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 99a7855ab1bc..c57252f004e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -60,12 +60,13 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0; + struct amdgpu_mem_stats stats; ktime_t usage[AMDGPU_HW_IP_NUM]; uint32_t bus, dev, fn, domain; unsigned int hw_ip; int ret; + memset(&stats, 0, sizeof(stats)); bus = adev->pdev->bus->number; domain = pci_domain_nr(adev->pdev->bus); dev = PCI_SLOT(adev->pdev->devfn); @@ -75,7 +76,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) if (ret) return; - amdgpu_vm_get_memory(vm, &vram_mem, >t_mem, &cpu_mem); + amdgpu_vm_get_memory(vm, &stats); amdgpu_bo_unreserve(vm->root.bo); amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage); @@ -90,9 +91,22 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name); seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context); - seq_printf(m, "drm-memory-vram:\t%llu KiB\n", vram_mem/1024UL); - seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", gtt_mem/1024UL); - seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", cpu_mem/1024UL); + seq_printf(m, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); + seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); + seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); + seq_printf(m, "amd-memory-visible-vram:\t%llu KiB\n", + stats.visible_vram/1024UL); + seq_printf(m, "amd-evicted-vram:\t%llu KiB\n", + stats.evicted_vram/1024UL); + seq_printf(m, "amd-evicted-visible-vram:\t%llu KiB\n", + stats.evicted_visible_vram/1024UL); + seq_printf(m, "amd-requested-vram:\t%llu KiB\n", + stats.requested_vram/1024UL); + seq_printf(m, "amd-requested-visible-vram:\t%llu KiB\n", + stats.requested_visible_vram/1024UL); + seq_printf(m, "amd-requested-gtt:\t%llu KiB\n", + stats.requested_gtt/1024UL); + for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { if (!usage[hw_ip]) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d8e683688daa..863cb668e000 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -969,7 +969,7 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused) * Therefore, we need to protect this ->comm access using RCU. */ rcu_read_lock(); - task = pid_task(file->pid, PIDTYPE_PID); + task = pid_task(file->pid, PIDTYPE_TGID); seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), task ? task->comm : "<unknown>"); rcu_read_unlock(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 35ed46b9249c..9d3a0542c996 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -305,6 +305,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->ring_obj = NULL; ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.kiq; + ring->vm_hub = AMDGPU_GFXHUB_0; r = amdgpu_gfx_kiq_acquire(adev, ring); if (r) @@ -725,7 +726,7 @@ int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) /* If not define special ras_late_init function, use gfx default ras_late_init */ if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; /* If not defined special ras_cb function, use default ras_cb */ if (!ras->ras_block.ras_cb) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index de9e7a00bb15..bfabea76d166 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -42,6 +42,8 @@ #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L +#define AMDGPU_MAX_GC_INSTANCES 8 + #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES @@ -53,6 +55,15 @@ enum amdgpu_gfx_pipe_priority { #define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 #define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 +enum amdgpu_gfx_partition { + AMDGPU_SPX_PARTITION_MODE = 0, + AMDGPU_DPX_PARTITION_MODE = 1, + AMDGPU_TPX_PARTITION_MODE = 2, + AMDGPU_QPX_PARTITION_MODE = 3, + AMDGPU_CPX_PARTITION_MODE = 4, + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, +}; + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -323,7 +334,7 @@ struct amdgpu_gfx { bool cp_fw_write_wait; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; - struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; + struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; unsigned num_compute_rings; struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src priv_reg_irq; @@ -364,6 +375,10 @@ struct amdgpu_gfx { struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS]; struct amdgpu_ring_mux muxer; + + enum amdgpu_gfx_partition partition_mode; + uint32_t num_xcd; + uint32_t num_xcc_per_xcp; }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 12a6826caef4..4e2531758866 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -395,8 +395,21 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, while (fault->timestamp >= stamp) { uint64_t tmp; - if (atomic64_read(&fault->key) == key) - return true; + if (atomic64_read(&fault->key) == key) { + /* + * if we get a fault which is already present in + * the fault_ring and the timestamp of + * the fault is after the expired timestamp, + * then this is a new fault that needs to be added + * into the fault ring. + */ + if (fault->timestamp_expiry != 0 && + amdgpu_ih_ts_after(fault->timestamp_expiry, + timestamp)) + break; + else + return true; + } tmp = fault->timestamp; fault = &gmc->fault_ring[fault->next]; @@ -432,28 +445,74 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, { struct amdgpu_gmc *gmc = &adev->gmc; uint64_t key = amdgpu_gmc_fault_key(addr, pasid); + struct amdgpu_ih_ring *ih; struct amdgpu_gmc_fault *fault; + uint32_t last_wptr; + uint64_t last_ts; uint32_t hash; uint64_t tmp; + ih = adev->irq.retry_cam_enabled ? &adev->irq.ih_soft : &adev->irq.ih1; + /* Get the WPTR of the last entry in IH ring */ + last_wptr = amdgpu_ih_get_wptr(adev, ih); + /* Order wptr with ring data. */ + rmb(); + /* Get the timetamp of the last entry in IH ring */ + last_ts = amdgpu_ih_decode_iv_ts(adev, ih, last_wptr, -1); + hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER); fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; do { - if (atomic64_cmpxchg(&fault->key, key, 0) == key) + if (atomic64_read(&fault->key) == key) { + /* + * Update the timestamp when this fault + * expired. + */ + fault->timestamp_expiry = last_ts; break; + } tmp = fault->timestamp; fault = &gmc->fault_ring[fault->next]; } while (fault->timestamp < tmp); } -int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev) +int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev) { - if (!adev->gmc.xgmi.connected_to_cpu) { - adev->gmc.xgmi.ras = &xgmi_ras; - amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block); - adev->gmc.xgmi.ras_if = &adev->gmc.xgmi.ras->ras_block.ras_comm; - } + int r; + + /* umc ras block */ + r = amdgpu_umc_ras_sw_init(adev); + if (r) + return r; + + /* mmhub ras block */ + r = amdgpu_mmhub_ras_sw_init(adev); + if (r) + return r; + + /* hdp ras block */ + r = amdgpu_hdp_ras_sw_init(adev); + if (r) + return r; + + /* mca.x ras block */ + r = amdgpu_mca_mp0_ras_sw_init(adev); + if (r) + return r; + + r = amdgpu_mca_mp1_ras_sw_init(adev); + if (r) + return r; + + r = amdgpu_mca_mpio_ras_sw_init(adev); + if (r) + return r; + + /* xgmi ras block */ + r = amdgpu_xgmi_ras_sw_init(adev); + if (r) + return r; return 0; } @@ -495,7 +554,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) for (i = 0; i < adev->num_rings; ++i) { ring = adev->rings[i]; - vmhub = ring->funcs->vmhub; + vmhub = ring->vm_hub; if (ring == &adev->mes.ring) continue; @@ -511,7 +570,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", - ring->name, ring->vm_inv_eng, ring->funcs->vmhub); + ring->name, ring->vm_inv_eng, ring->vm_hub); } return 0; @@ -595,6 +654,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) gc_ver == IP_VERSION(9, 4, 0) || gc_ver == IP_VERSION(9, 4, 1) || gc_ver == IP_VERSION(9, 4, 2) || + gc_ver == IP_VERSION(9, 4, 3) || gc_ver >= IP_VERSION(10, 3, 0)); gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 0305b660cd17..6d105d7fb98b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -70,6 +70,7 @@ struct amdgpu_gmc_fault { uint64_t timestamp:48; uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER; atomic64_t key; + uint64_t timestamp_expiry:48; }; /* @@ -104,6 +105,8 @@ struct amdgpu_vmhub { uint32_t vm_cntx_cntl_vm_fault; uint32_t vm_l2_bank_select_reserved_cid2; + uint32_t vm_contexts_disable; + const struct amdgpu_vmhub_funcs *vmhub_funcs; }; @@ -351,7 +354,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint16_t pasid, uint64_t timestamp); void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid); -int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev); +int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev); int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c new file mode 100644 index 000000000000..b6cf801939aa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -0,0 +1,48 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_hdp_ras *ras; + + if (!adev->hdp.ras) + return 0; + + ras = adev->hdp.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register hdp ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "hdp"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__HDP; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->hdp.ras_if = &ras->ras_block.ras_comm; + + /* hdp ras follows amdgpu_ras_block_late_init_default for late init */ + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index ac5c61d3de2b..7b8a6152dc8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -43,5 +43,5 @@ struct amdgpu_hdp { struct amdgpu_hdp_ras *ras; }; -int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); +int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index bcccc348dbe2..4ff348e10e4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -69,7 +69,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (size) { r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type], - &ib->sa_bo, size, 256); + &ib->sa_bo, size); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); return r; @@ -267,7 +267,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) - amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid); + amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid); amdgpu_ring_undo(ring); return r; } @@ -309,8 +309,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], - AMDGPU_IB_POOL_SIZE, - AMDGPU_GPU_PAGE_SIZE, + AMDGPU_IB_POOL_SIZE, 256, AMDGPU_GEM_DOMAIN_GTT); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3f07b1a2ce47..c991ca0b7a1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -202,7 +202,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct dma_fence **fences; unsigned i; @@ -277,7 +277,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; uint64_t fence_context = adev->fence_context + ring->idx; bool needs_flush = vm->use_cpu_for_update; @@ -338,7 +338,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; uint64_t fence_context = adev->fence_context + ring->idx; uint64_t updates = amdgpu_vm_tlb_seq(vm); @@ -398,7 +398,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid *idle = NULL; struct amdgpu_vmid *id = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index d0a1cc88832c..fafebec5b7b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -596,6 +596,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, if (!src->enabled_types || !src->funcs->set) return -EINVAL; + if (WARN_ON(!amdgpu_irq_enabled(adev, src, type))) + return -EINVAL; + if (atomic_dec_and_test(&src->enabled_types[type])) return amdgpu_irq_update(adev, src, type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index e9f2c11ea416..be243adf3e65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -98,6 +98,8 @@ struct amdgpu_irq { struct irq_domain *domain; /* GPU irq controller domain */ unsigned virq[AMDGPU_MAX_IRQ_SRC_ID]; uint32_t srbm_soft_reset; + u32 retry_cam_doorbell_index; + bool retry_cam_enabled; }; void amdgpu_irq_disable_all(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6f81ed4fb0d9..b07c000fc8ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -118,6 +118,10 @@ int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + /* JPEG in SRIOV does not support direct register read/write */ + if (amdgpu_sriov_vf(adev)) + return 0; + WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) @@ -202,17 +206,18 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else { r = 0; } - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); - if (tmp == 0xDEADBEEF) - break; - udelay(1); + if (!amdgpu_sriov_vf(adev)) { + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; } - if (i >= adev->usec_timeout) - r = -ETIMEDOUT; - dma_fence_put(fence); error: return r; @@ -236,19 +241,28 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, return 0; } -void jpeg_set_ras_funcs(struct amdgpu_device *adev) +int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) { + int err; + struct amdgpu_jpeg_ras *ras; + if (!adev->jpeg.ras) - return; + return 0; + + ras = adev->jpeg.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register jpeg ras block!\n"); + return err; + } - amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block); + strcpy(ras->ras_block.ras_comm.name, "jpeg"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; + adev->jpeg.ras_if = &ras->ras_block.ras_comm; - strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg"); - adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG; - adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; - adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm; + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->jpeg.ras->ras_block.ras_late_init) - adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index e8ca3e32ad52..0ca76f0f23e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -72,6 +72,6 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); -void jpeg_set_ras_funcs(struct amdgpu_device *adev); +int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 51c2a82e2fa4..8d9ff9e151de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -70,3 +70,75 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, amdgpu_mca_reset_error_count(adev, mc_status_addr); } + +int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_mca_ras_block *ras; + + if (!adev->mca.mp0.ras) + return 0; + + ras = adev->mca.mp0.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "mca.mp0"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mca.mp0.ras_if = &ras->ras_block.ras_comm; + + return 0; +} + +int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_mca_ras_block *ras; + + if (!adev->mca.mp1.ras) + return 0; + + ras = adev->mca.mp1.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "mca.mp1"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mca.mp1.ras_if = &ras->ras_block.ras_comm; + + return 0; +} + +int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_mca_ras_block *ras; + + if (!adev->mca.mpio.ras) + return 0; + + ras = adev->mca.mpio.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register mca.mpio ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "mca.mpio"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mca.mpio.ras_if = &ras->ras_block.ras_comm; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index 7ce16d16e34b..997a073e2409 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -30,12 +30,7 @@ struct amdgpu_mca_ras { struct amdgpu_mca_ras_block *ras; }; -struct amdgpu_mca_funcs { - void (*init)(struct amdgpu_device *adev); -}; - struct amdgpu_mca { - const struct amdgpu_mca_funcs *funcs; struct amdgpu_mca_ras mp0; struct amdgpu_mca_ras mp1; struct amdgpu_mca_ras mpio; @@ -55,5 +50,7 @@ void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr, void *ras_error_status); - +int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 82e27bd4f038..f0f00466b59f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1104,6 +1104,11 @@ int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, &ctx_data->meta_data_obj, &ctx_data->meta_data_mc_addr, &ctx_data->meta_data_ptr); + if (r) { + dev_warn(adev->dev, "(%d) create CTX bo failed\n", r); + return r; + } + if (!ctx_data->meta_data_obj) return -ENOMEM; @@ -1328,12 +1333,9 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev) struct amdgpu_mes_ctx_data ctx_data = {0}; struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL }; int gang_ids[3] = {0}; - int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, - AMDGPU_MES_CTX_MAX_GFX_RINGS}, - { AMDGPU_RING_TYPE_COMPUTE, - AMDGPU_MES_CTX_MAX_COMPUTE_RINGS}, - { AMDGPU_RING_TYPE_SDMA, - AMDGPU_MES_CTX_MAX_SDMA_RINGS } }; + int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 }, + { AMDGPU_RING_TYPE_COMPUTE, 1 }, + { AMDGPU_RING_TYPE_SDMA, 1} }; int i, r, pasid, k = 0; pasid = amdgpu_pasid_alloc(16); @@ -1432,13 +1434,31 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) struct amdgpu_firmware_info *info; char ucode_prefix[30]; char fw_name[40]; + bool need_retry = false; int r; - amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", - ucode_prefix, - pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, + sizeof(ucode_prefix)); + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", + ucode_prefix, + pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); + need_retry = true; + } else { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", + ucode_prefix, + pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); + } + r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name); + if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", + ucode_prefix); + DRM_INFO("try to fall back to %s\n", fw_name); + r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], + fw_name); + } + if (r) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c new file mode 100644 index 000000000000..0f6b1021fef3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_mmhub_ras *ras; + + if (!adev->mmhub.ras) + return 0; + + ras = adev->mmhub.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register mmhub ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "mmhub"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mmhub.ras_if = &ras->ras_block.ras_comm; + + /* mmhub ras follows amdgpu_ras_block_late_init_default for late init */ + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 93430d3823c9..d21bb6dae56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -48,5 +48,7 @@ struct amdgpu_mmhub { struct amdgpu_mmhub_ras *ras; }; +int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 37d779b8e4a6..a3bc00577a7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -22,6 +22,29 @@ #include "amdgpu.h" #include "amdgpu_ras.h" +int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_nbio_ras *ras; + + if (!adev->nbio.ras) + return 0; + + ras = adev->nbio.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register pcie_bif ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "pcie_bif"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__PCIE_BIF; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->nbio.ras_if = &ras->ras_block.ras_comm; + + return 0; +} + int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index a240336bbc6b..c686ff4bcc39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -106,5 +106,6 @@ struct amdgpu_nbio { struct amdgpu_nbio_ras *ras; }; +int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev); int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6c7d672412b2..2bd1a54ee866 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -600,7 +600,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && bo->tbo.resource->mem_type == TTM_PL_VRAM && - bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT) + amdgpu_bo_in_cpu_visible_vram(bo)) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else @@ -1265,24 +1265,41 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } -void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, - uint64_t *gtt_mem, uint64_t *cpu_mem) +void amdgpu_bo_get_memory(struct amdgpu_bo *bo, + struct amdgpu_mem_stats *stats) { unsigned int domain; + uint64_t size = amdgpu_bo_size(bo); domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: - *vram_mem += amdgpu_bo_size(bo); + stats->vram += size; + if (amdgpu_bo_in_cpu_visible_vram(bo)) + stats->visible_vram += size; break; case AMDGPU_GEM_DOMAIN_GTT: - *gtt_mem += amdgpu_bo_size(bo); + stats->gtt += size; break; case AMDGPU_GEM_DOMAIN_CPU: default: - *cpu_mem += amdgpu_bo_size(bo); + stats->cpu += size; break; } + + if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) { + stats->requested_vram += size; + if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + stats->requested_visible_vram += size; + + if (domain != AMDGPU_GEM_DOMAIN_VRAM) { + stats->evicted_vram += size; + if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + stats->evicted_visible_vram += size; + } + } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) { + stats->requested_gtt += size; + } } /** @@ -1346,7 +1363,6 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); - unsigned long offset; int r; /* Remember that this BO was accessed by the CPU */ @@ -1355,8 +1371,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) if (bo->resource->mem_type != TTM_PL_VRAM) return 0; - offset = bo->resource->start << PAGE_SHIFT; - if ((offset + bo->base.size) <= adev->gmc.visible_vram_size) + if (amdgpu_bo_in_cpu_visible_vram(abo)) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -1378,10 +1393,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) else if (unlikely(r)) return VM_FAULT_SIGBUS; - offset = bo->resource->start << PAGE_SHIFT; /* this should never happen */ if (bo->resource->mem_type == TTM_PL_VRAM && - (offset + bo->base.size) > adev->gmc.visible_vram_size) + !amdgpu_bo_in_cpu_visible_vram(abo)) return VM_FAULT_SIGBUS; ttm_bo_move_to_lru_tail_unlocked(bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 93207badf83f..35b8106816a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -126,6 +126,27 @@ struct amdgpu_bo_vm { struct amdgpu_vm_bo_base entries[]; }; +struct amdgpu_mem_stats { + /* current VRAM usage, includes visible VRAM */ + uint64_t vram; + /* current visible VRAM usage */ + uint64_t visible_vram; + /* current GTT usage */ + uint64_t gtt; + /* current system memory usage */ + uint64_t cpu; + /* sum of evicted buffers, includes visible VRAM */ + uint64_t evicted_vram; + /* sum of evicted buffers due to CPU access */ + uint64_t evicted_visible_vram; + /* how much userspace asked for, includes vis.VRAM */ + uint64_t requested_vram; + /* how much userspace asked for */ + uint64_t requested_visible_vram; + /* how much userspace asked for */ + uint64_t requested_gtt; +}; + static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) { return container_of(tbo, struct amdgpu_bo, tbo); @@ -325,8 +346,8 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); -void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, - uint64_t *gtt_mem, uint64_t *cpu_mem); +void amdgpu_bo_get_memory(struct amdgpu_bo *bo, + struct amdgpu_mem_stats *stats); void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence); @@ -336,15 +357,22 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, /* * sub allocation */ +static inline struct amdgpu_sa_manager * +to_amdgpu_sa_manager(struct drm_suballoc_manager *manager) +{ + return container_of(manager, struct amdgpu_sa_manager, base); +} -static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) +static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo) { - return sa_bo->manager->gpu_addr + sa_bo->soffset; + return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr + + drm_suballoc_soffset(sa_bo); } -static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) +static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo) { - return sa_bo->manager->cpu_ptr + sa_bo->soffset; + return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr + + drm_suballoc_soffset(sa_bo); } int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, @@ -355,11 +383,11 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager); int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, - struct amdgpu_sa_bo **sa_bo, - unsigned size, unsigned align); + struct drm_suballoc **sa_bo, + unsigned int size); void amdgpu_sa_bo_free(struct amdgpu_device *adev, - struct amdgpu_sa_bo **sa_bo, - struct dma_fence *fence); + struct drm_suballoc **sa_bo, + struct dma_fence *fence); #if defined(CONFIG_DEBUG_FS) void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 3f5d13035aff..9d7e6e0e73ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -148,6 +148,7 @@ static int psp_init_sriov_microcode(struct psp_context *psp) break; case IP_VERSION(13, 0, 10): adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; + ret = psp_init_cap_microcode(psp, ucode_prefix); break; default: return -EINVAL; @@ -191,6 +192,7 @@ static int psp_early_init(void *handle) psp_v12_0_set_psp_funcs(psp); break; case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 6): psp_v13_0_set_psp_funcs(psp); break; case IP_VERSION(13, 0, 1): @@ -837,7 +839,15 @@ static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, static int psp_tmr_unload(struct psp_context *psp) { int ret; - struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + struct psp_gfx_cmd_resp *cmd; + + /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, + * as TMR is not loaded at all + */ + if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); psp_prep_tmr_unload_cmd_buf(psp, cmd); dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 63dfcc98152d..3ab8a88789c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -34,6 +34,7 @@ #include "amdgpu_atomfirmware.h" #include "amdgpu_xgmi.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" +#include "nbio_v4_3.h" #include "atom.h" #include "amdgpu_reset.h" @@ -2428,6 +2429,13 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) else adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 1 << AMDGPU_RAS_BLOCK__JPEG); + + /* + * XGMI RAS is not supported if xgmi num physical nodes + * is zero + */ + if (!adev->gmc.xgmi.num_physical_nodes) + adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL); } else { dev_info(adev->dev, "SRAM ECC is not presented.\n"); } @@ -2554,21 +2562,34 @@ int amdgpu_ras_init(struct amdgpu_device *adev) /* initialize nbio ras function ahead of any other * ras functions so hardware fatal error interrupt * can be enabled as early as possible */ - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - if (!adev->gmc.xgmi.connected_to_cpu) { + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 4, 0): + case IP_VERSION(7, 4, 1): + case IP_VERSION(7, 4, 4): + if (!adev->gmc.xgmi.connected_to_cpu) adev->nbio.ras = &nbio_v7_4_ras; - amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block); - adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm; - } + break; + case IP_VERSION(4, 3, 0): + if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF)) + /* unlike other generation of nbio ras, + * nbio v4_3 only support fatal error interrupt + * to inform software that DF is freezed due to + * system fatal error event. driver should not + * enable nbio ras in such case. Instead, + * check DF RAS */ + adev->nbio.ras = &nbio_v4_3_ras; break; default: /* nbio ras is not available */ break; } + /* nbio ras block needs to be enabled ahead of other ras blocks + * to handle fatal error */ + r = amdgpu_nbio_ras_sw_init(adev); + if (r) + return r; + if (adev->nbio.ras && adev->nbio.ras->init_ras_controller_interrupt) { r = adev->nbio.ras->init_ras_controller_interrupt(adev); @@ -3073,9 +3094,6 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, if (!adev || !ras_block_obj) return -EINVAL; - if (!amdgpu_ras_asic_supported(adev)) - return 0; - ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL); if (!ras_node) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index ef38f4c93df0..17b3d1992e80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -583,6 +583,10 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) { return TA_RAS_BLOCK__FUSE; case AMDGPU_RAS_BLOCK__MCA: return TA_RAS_BLOCK__MCA; + case AMDGPU_RAS_BLOCK__VCN: + return TA_RAS_BLOCK__VCN; + case AMDGPU_RAS_BLOCK__JPEG: + return TA_RAS_BLOCK__JPEG; default: WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); return TA_RAS_BLOCK__UMC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 2e08fce87521..c2c2a7718613 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -107,47 +107,12 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev) { - if (adev->asic_type == CHIP_IP_DISCOVERY) { - switch (adev->ip_versions[MP1_HWIP][0]) { - case IP_VERSION(13, 0, 0): - case IP_VERSION(13, 0, 10): - return true; - default: - return false; - } - } - - return adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_SIENNA_CICHLID || - adev->asic_type == CHIP_ALDEBARAN; -} - -static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev, - struct amdgpu_ras_eeprom_control *control) -{ - struct atom_context *atom_ctx = adev->mode_info.atom_context; - - if (!control || !atom_ctx) - return false; - - if (strnstr(atom_ctx->vbios_version, - "D342", - sizeof(atom_ctx->vbios_version))) - control->i2c_address = EEPROM_I2C_MADDR_0; - else - control->i2c_address = EEPROM_I2C_MADDR_4; - - return true; -} - -static bool __get_eeprom_i2c_addr_ip_discovery(struct amdgpu_device *adev, - struct amdgpu_ras_eeprom_control *control) -{ switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */ + case IP_VERSION(11, 0, 7): /* Sienna cichlid */ case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 2): /* Aldebaran */ case IP_VERSION(13, 0, 10): - control->i2c_address = EEPROM_I2C_MADDR_4; return true; default: return false; @@ -178,43 +143,35 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, return true; } - switch (adev->asic_type) { - case CHIP_VEGA20: - control->i2c_address = EEPROM_I2C_MADDR_0; - break; - - case CHIP_ARCTURUS: - return __get_eeprom_i2c_addr_arct(adev, control); - - case CHIP_SIENNA_CICHLID: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 2): + /* VEGA20 and ARCTURUS */ + if (adev->asic_type == CHIP_VEGA20) + control->i2c_address = EEPROM_I2C_MADDR_0; + else if (strnstr(atom_ctx->vbios_version, + "D342", + sizeof(atom_ctx->vbios_version))) + control->i2c_address = EEPROM_I2C_MADDR_0; + else + control->i2c_address = EEPROM_I2C_MADDR_4; + return true; + case IP_VERSION(11, 0, 7): control->i2c_address = EEPROM_I2C_MADDR_0; - break; - - case CHIP_ALDEBARAN: + return true; + case IP_VERSION(13, 0, 2): if (strnstr(atom_ctx->vbios_version, "D673", sizeof(atom_ctx->vbios_version))) control->i2c_address = EEPROM_I2C_MADDR_4; else control->i2c_address = EEPROM_I2C_MADDR_0; - break; - - case CHIP_IP_DISCOVERY: - return __get_eeprom_i2c_addr_ip_discovery(adev, control); - - default: - return false; - } - - switch (adev->ip_versions[MP1_HWIP][0]) { + return true; case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 10): control->i2c_address = EEPROM_I2C_MADDR_4; - break; - + return true; default: - break; + return false; } - - return true; } static void diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 3989e755a5b4..d8749444b689 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -27,6 +27,7 @@ #include <drm/amdgpu_drm.h> #include <drm/gpu_scheduler.h> #include <drm/drm_print.h> +#include <drm/drm_suballoc.h> struct amdgpu_device; struct amdgpu_ring; @@ -92,7 +93,7 @@ enum amdgpu_ib_pool_type { }; struct amdgpu_ib { - struct amdgpu_sa_bo *sa_bo; + struct drm_suballoc *sa_bo; uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; @@ -164,7 +165,6 @@ struct amdgpu_ring_funcs { bool support_64bit_ptrs; bool no_user_fence; bool secure_submission_supported; - unsigned vmhub; unsigned extra_dw; /* ring read/write ptr handling */ @@ -249,6 +249,7 @@ struct amdgpu_ring { uint64_t ptr_mask; uint32_t buf_mask; u32 idx; + u32 xcc_id; u32 me; u32 pipe; u32 queue; @@ -274,6 +275,7 @@ struct amdgpu_ring { unsigned cond_exe_offs; u64 cond_exe_gpu_addr; volatile u32 *cond_exe_cpu_addr; + unsigned vm_hub; unsigned vm_inv_eng; struct dma_fence *vmid_wait; bool has_compute_vm_bug; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 524d10b21041..c6b4337eb20c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -44,327 +44,63 @@ #include "amdgpu.h" -static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo); -static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager); - int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager, - unsigned size, u32 align, u32 domain) + unsigned int size, u32 suballoc_align, u32 domain) { - int i, r; - - init_waitqueue_head(&sa_manager->wq); - sa_manager->bo = NULL; - sa_manager->size = size; - sa_manager->domain = domain; - sa_manager->align = align; - sa_manager->hole = &sa_manager->olist; - INIT_LIST_HEAD(&sa_manager->olist); - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) - INIT_LIST_HEAD(&sa_manager->flist[i]); + int r; - r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo, - &sa_manager->gpu_addr, &sa_manager->cpu_ptr); + r = amdgpu_bo_create_kernel(adev, size, AMDGPU_GPU_PAGE_SIZE, domain, + &sa_manager->bo, &sa_manager->gpu_addr, + &sa_manager->cpu_ptr); if (r) { dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); return r; } - memset(sa_manager->cpu_ptr, 0, sa_manager->size); + memset(sa_manager->cpu_ptr, 0, size); + drm_suballoc_manager_init(&sa_manager->base, size, suballoc_align); return r; } void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager) { - struct amdgpu_sa_bo *sa_bo, *tmp; - if (sa_manager->bo == NULL) { dev_err(adev->dev, "no bo for sa manager\n"); return; } - if (!list_empty(&sa_manager->olist)) { - sa_manager->hole = &sa_manager->olist, - amdgpu_sa_bo_try_free(sa_manager); - if (!list_empty(&sa_manager->olist)) { - dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n"); - } - } - list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { - amdgpu_sa_bo_remove_locked(sa_bo); - } + drm_suballoc_manager_fini(&sa_manager->base); amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr); - sa_manager->size = 0; } -static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) -{ - struct amdgpu_sa_manager *sa_manager = sa_bo->manager; - if (sa_manager->hole == &sa_bo->olist) { - sa_manager->hole = sa_bo->olist.prev; - } - list_del_init(&sa_bo->olist); - list_del_init(&sa_bo->flist); - dma_fence_put(sa_bo->fence); - kfree(sa_bo); -} - -static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) +int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, + struct drm_suballoc **sa_bo, + unsigned int size) { - struct amdgpu_sa_bo *sa_bo, *tmp; + struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size, + GFP_KERNEL, true, 0); - if (sa_manager->hole->next == &sa_manager->olist) - return; + if (IS_ERR(sa)) { + *sa_bo = NULL; - sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); - list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { - if (sa_bo->fence == NULL || - !dma_fence_is_signaled(sa_bo->fence)) { - return; - } - amdgpu_sa_bo_remove_locked(sa_bo); + return PTR_ERR(sa); } -} -static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager) -{ - struct list_head *hole = sa_manager->hole; - - if (hole != &sa_manager->olist) { - return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset; - } + *sa_bo = sa; return 0; } -static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager) -{ - struct list_head *hole = sa_manager->hole; - - if (hole->next != &sa_manager->olist) { - return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset; - } - return sa_manager->size; -} - -static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager, - struct amdgpu_sa_bo *sa_bo, - unsigned size, unsigned align) -{ - unsigned soffset, eoffset, wasted; - - soffset = amdgpu_sa_bo_hole_soffset(sa_manager); - eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); - wasted = (align - (soffset % align)) % align; - - if ((eoffset - soffset) >= (size + wasted)) { - soffset += wasted; - - sa_bo->manager = sa_manager; - sa_bo->soffset = soffset; - sa_bo->eoffset = soffset + size; - list_add(&sa_bo->olist, sa_manager->hole); - INIT_LIST_HEAD(&sa_bo->flist); - sa_manager->hole = &sa_bo->olist; - return true; - } - return false; -} - -/** - * amdgpu_sa_event - Check if we can stop waiting - * - * @sa_manager: pointer to the sa_manager - * @size: number of bytes we want to allocate - * @align: alignment we need to match - * - * Check if either there is a fence we can wait for or - * enough free memory to satisfy the allocation directly - */ -static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, - unsigned size, unsigned align) -{ - unsigned soffset, eoffset, wasted; - int i; - - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) - if (!list_empty(&sa_manager->flist[i])) - return true; - - soffset = amdgpu_sa_bo_hole_soffset(sa_manager); - eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); - wasted = (align - (soffset % align)) % align; - - if ((eoffset - soffset) >= (size + wasted)) { - return true; - } - - return false; -} - -static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, - struct dma_fence **fences, - unsigned *tries) -{ - struct amdgpu_sa_bo *best_bo = NULL; - unsigned i, soffset, best, tmp; - - /* if hole points to the end of the buffer */ - if (sa_manager->hole->next == &sa_manager->olist) { - /* try again with its beginning */ - sa_manager->hole = &sa_manager->olist; - return true; - } - - soffset = amdgpu_sa_bo_hole_soffset(sa_manager); - /* to handle wrap around we add sa_manager->size */ - best = sa_manager->size * 2; - /* go over all fence list and try to find the closest sa_bo - * of the current last - */ - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) { - struct amdgpu_sa_bo *sa_bo; - - fences[i] = NULL; - - if (list_empty(&sa_manager->flist[i])) - continue; - - sa_bo = list_first_entry(&sa_manager->flist[i], - struct amdgpu_sa_bo, flist); - - if (!dma_fence_is_signaled(sa_bo->fence)) { - fences[i] = sa_bo->fence; - continue; - } - - /* limit the number of tries each ring gets */ - if (tries[i] > 2) { - continue; - } - - tmp = sa_bo->soffset; - if (tmp < soffset) { - /* wrap around, pretend it's after */ - tmp += sa_manager->size; - } - tmp -= soffset; - if (tmp < best) { - /* this sa bo is the closest one */ - best = tmp; - best_bo = sa_bo; - } - } - - if (best_bo) { - uint32_t idx = best_bo->fence->context; - - idx %= AMDGPU_SA_NUM_FENCE_LISTS; - ++tries[idx]; - sa_manager->hole = best_bo->olist.prev; - - /* we knew that this one is signaled, - so it's save to remote it */ - amdgpu_sa_bo_remove_locked(best_bo); - return true; - } - return false; -} - -int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, - struct amdgpu_sa_bo **sa_bo, - unsigned size, unsigned align) -{ - struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; - unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; - unsigned count; - int i, r; - signed long t; - - if (WARN_ON_ONCE(align > sa_manager->align)) - return -EINVAL; - - if (WARN_ON_ONCE(size > sa_manager->size)) - return -EINVAL; - - *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL); - if (!(*sa_bo)) - return -ENOMEM; - (*sa_bo)->manager = sa_manager; - (*sa_bo)->fence = NULL; - INIT_LIST_HEAD(&(*sa_bo)->olist); - INIT_LIST_HEAD(&(*sa_bo)->flist); - - spin_lock(&sa_manager->wq.lock); - do { - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) - tries[i] = 0; - - do { - amdgpu_sa_bo_try_free(sa_manager); - - if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo, - size, align)) { - spin_unlock(&sa_manager->wq.lock); - return 0; - } - - /* see if we can skip over some allocations */ - } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); - - for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) - if (fences[i]) - fences[count++] = dma_fence_get(fences[i]); - - if (count) { - spin_unlock(&sa_manager->wq.lock); - t = dma_fence_wait_any_timeout(fences, count, false, - MAX_SCHEDULE_TIMEOUT, - NULL); - for (i = 0; i < count; ++i) - dma_fence_put(fences[i]); - - r = (t > 0) ? 0 : t; - spin_lock(&sa_manager->wq.lock); - } else { - /* if we have nothing to wait for block */ - r = wait_event_interruptible_locked( - sa_manager->wq, - amdgpu_sa_event(sa_manager, size, align) - ); - } - - } while (!r); - - spin_unlock(&sa_manager->wq.lock); - kfree(*sa_bo); - *sa_bo = NULL; - return r; -} - -void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, +void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo, struct dma_fence *fence) { - struct amdgpu_sa_manager *sa_manager; - if (sa_bo == NULL || *sa_bo == NULL) { return; } - sa_manager = (*sa_bo)->manager; - spin_lock(&sa_manager->wq.lock); - if (fence && !dma_fence_is_signaled(fence)) { - uint32_t idx; - - (*sa_bo)->fence = dma_fence_get(fence); - idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; - list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); - } else { - amdgpu_sa_bo_remove_locked(*sa_bo); - } - wake_up_all_locked(&sa_manager->wq); - spin_unlock(&sa_manager->wq.lock); + drm_suballoc_free(*sa_bo, fence); *sa_bo = NULL; } @@ -373,26 +109,8 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m) { - struct amdgpu_sa_bo *i; - - spin_lock(&sa_manager->wq.lock); - list_for_each_entry(i, &sa_manager->olist, olist) { - uint64_t soffset = i->soffset + sa_manager->gpu_addr; - uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; - if (&i->olist == sa_manager->hole) { - seq_printf(m, ">"); - } else { - seq_printf(m, " "); - } - seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", - soffset, eoffset, eoffset - soffset); + struct drm_printer p = drm_seq_file_printer(m); - if (i->fence) - seq_printf(m, " protected by 0x%016llx on context %llu", - i->fence->seqno, i->fence->context); - - seq_printf(m, "\n"); - } - spin_unlock(&sa_manager->wq.lock); + drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr); } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index e9b45089a28a..863b2a34b2d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -38,6 +38,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, { struct fd f = fdget(fd); struct amdgpu_fpriv *fpriv; + struct amdgpu_ctx_mgr *mgr; struct amdgpu_ctx *ctx; uint32_t id; int r; @@ -51,8 +52,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, return r; } - idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id) + mgr = &fpriv->ctx_mgr; + mutex_lock(&mgr->lock); + idr_for_each_entry(&mgr->ctx_handles, ctx, id) amdgpu_ctx_priority_override(ctx, priority); + mutex_unlock(&mgr->lock); fdput(f); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 98d91ebf5c26..525dffbe046a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -233,7 +233,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, __entry->pasid = vm->pasid; __assign_str(ring, ring->name); __entry->vmid = job->vmid; - __entry->vm_hub = ring->funcs->vmhub, + __entry->vm_hub = ring->vm_hub, __entry->pd_addr = job->vm_pd_addr; __entry->needs_flush = job->vm_needs_flush; ), @@ -427,7 +427,7 @@ TRACE_EVENT(amdgpu_vm_flush, TP_fast_assign( __assign_str(ring, ring->name); __entry->vmid = vmid; - __entry->vm_hub = ring->funcs->vmhub; + __entry->vm_hub = ring->vm_hub; __entry->pd_addr = pd_addr; ), TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c5ef7f7bdc15..2cd081cbf706 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -466,11 +466,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; } - /* Can't move a pinned BO */ abo = ttm_to_amdgpu_bo(bo); - if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) - return -EINVAL; - adev = amdgpu_ttm_adev(bo->bdev); if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 380b89114341..f76b1cb8baf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -126,19 +126,6 @@ void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr) } } -void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr) -{ - uint16_t version_major = le16_to_cpu(hdr->header_version_major); - uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); - - DRM_DEBUG("IMU\n"); - amdgpu_ucode_print_common_hdr(hdr); - - if (version_major != 1) { - DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor); - } -} - void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr) { uint16_t version_major = le16_to_cpu(hdr->header_version_major); @@ -472,6 +459,12 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr) DRM_DEBUG("psp_dbg_drv_size_bytes: %u\n", le32_to_cpu(desc->size_bytes)); break; + case PSP_FW_TYPE_PSP_RAS_DRV: + DRM_DEBUG("psp_ras_drv_version: %u\n", + le32_to_cpu(desc->fw_version)); + DRM_DEBUG("psp_ras_drv_size_bytes: %u\n", + le32_to_cpu(desc->size_bytes)); + break; default: DRM_DEBUG("Unsupported PSP fw type: %d\n", desc->fw_type); break; @@ -669,6 +662,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) return "VCN1_RAM"; case AMDGPU_UCODE_ID_DMCUB: return "DMCUB"; + case AMDGPU_UCODE_ID_CAP: + return "CAP"; default: return "UNKNOWN UCODE"; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 1b8574bc4463..1edf8e6aeb16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -208,6 +208,36 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true); } +int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_umc_ras *ras; + + if (!adev->umc.ras) + return 0; + + ras = adev->umc.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register umc ras block!\n"); + return err; + } + + strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->umc.ras_if = &ras->ras_block.ras_comm; + + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; + + if (!ras->ras_block.ras_cb) + ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; + + return 0; +} + int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; @@ -272,3 +302,34 @@ void amdgpu_umc_fill_error_record(struct ras_err_data *err_data, err_data->err_addr_cnt++; } + +int amdgpu_umc_loop_channels(struct amdgpu_device *adev, + umc_func func, void *data) +{ + uint32_t node_inst = 0; + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + int ret = 0; + + if (adev->umc.node_inst_num) { + LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) { + ret = func(adev, node_inst, umc_inst, ch_inst, data); + if (ret) { + dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n", + node_inst, umc_inst, ch_inst, ret); + return ret; + } + } + } else { + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + ret = func(adev, 0, umc_inst, ch_inst, data); + if (ret) { + dev_err(adev->dev, "Umc %d ch %d func returns %d\n", + umc_inst, ch_inst, ret); + return ret; + } + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 36e19336f3b3..86133f77a9a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -47,6 +47,10 @@ #define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \ LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst)) + +typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst, + uint32_t umc_inst, uint32_t ch_inst, void *data); + struct amdgpu_umc_ras { struct amdgpu_ras_block_object ras_block; void (*err_cnt_init)(struct amdgpu_device *adev); @@ -87,6 +91,7 @@ struct amdgpu_umc { unsigned long active_mask; }; +int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev); int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset); int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, @@ -103,4 +108,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev, uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst); + +int amdgpu_umc_loop_channels(struct amdgpu_device *adev, + umc_func func, void *data); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 632a6ded5735..6887109abb13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1118,14 +1118,11 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, { struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; + uint32_t offset, data[4]; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint32_t data[4]; uint64_t addr; - long r; - int i; - unsigned offset_idx = 0; - unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; + int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, AMDGPU_FENCE_OWNER_UNDEFINED, @@ -1134,16 +1131,15 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) return r; - if (adev->asic_type >= CHIP_VEGA10) { - offset_idx = 1 + ring->me; - offset[1] = adev->reg_offset[UVD_HWIP][0][1]; - offset[2] = adev->reg_offset[UVD_HWIP][1][1]; - } + if (adev->asic_type >= CHIP_VEGA10) + offset = adev->reg_offset[UVD_HWIP][ring->me][1]; + else + offset = UVD_BASE_SI; - data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0); - data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0); - data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0); - data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0); + data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0); + data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0); + data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0); + data[3] = PACKET0(offset + UVD_NO_OP, 0); ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); @@ -1160,14 +1156,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout(bo->tbo.base.resv, - DMA_RESV_USAGE_KERNEL, false, - msecs_to_jiffies(10)); - if (r == 0) - r = -ETIMEDOUT; - if (r < 0) - goto err_free; - r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 2fb61410b1c0..e2b7324a70cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -585,6 +585,7 @@ err: /** * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary * + * @p: cs parser * @ib: indirect buffer to use * @lo: address of lower dword * @hi: address of higher dword diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e7974de8b035..e63fcc58e8e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1181,19 +1181,28 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, return 0; } -void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev) +int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) { + int err; + struct amdgpu_vcn_ras *ras; + if (!adev->vcn.ras) - return; + return 0; - amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block); + ras = adev->vcn.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register vcn ras block!\n"); + return err; + } - strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn"); - adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN; - adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; - adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm; + strcpy(ras->ras_block.ras_comm.name, "vcn"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; + adev->vcn.ras_if = &ras->ras_block.ras_comm; - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->vcn.ras->ras_block.ras_late_init) - adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index d3e2af902907..c730949ece7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -400,6 +400,6 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); -void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev); +int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b9441ab457ea..3c0310576b3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -483,7 +483,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; if (job->vmid == 0) @@ -517,7 +517,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; bool spm_update_needed = job->spm_update_needed; @@ -867,6 +867,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, pages_addr[idx - 1] + PAGE_SIZE)) break; } + if (!contiguous) + count--; num_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE; } @@ -918,8 +920,8 @@ error_unlock: return r; } -void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, - uint64_t *gtt_mem, uint64_t *cpu_mem) +void amdgpu_vm_get_memory(struct amdgpu_vm *vm, + struct amdgpu_mem_stats *stats) { struct amdgpu_bo_va *bo_va, *tmp; @@ -927,41 +929,36 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } spin_unlock(&vm->status_lock); } + /** * amdgpu_vm_bo_update - update all BO mappings in the vm page table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 856a64bc7a89..6f085f0b4ef3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -40,6 +40,7 @@ struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; struct amdgpu_bo_vm; +struct amdgpu_mem_stats; /* * GPUVM handling @@ -457,8 +458,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, - uint64_t *gtt_mem, uint64_t *cpu_mem); +void amdgpu_vm_get_memory(struct amdgpu_vm *vm, + struct amdgpu_mem_stats *stats); int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_vm *vmbo, bool immediate); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 01e42bdd8e4e..df63dc3bca18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -673,6 +673,7 @@ void amdgpu_vm_pt_free_work(struct work_struct *work) * @adev: amdgpu device structure * @vm: amdgpu vm structure * @start: optional cursor where to start freeing PDs/PTs + * @unlocked: vm resv unlock status * * Free the page directory or page table level and all sub levels. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 535cd6569bcc..349416e176a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -171,7 +171,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, src += p->num_dw_left * 4; - pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo)); + pe += amdgpu_bo_gpu_offset_no_check(bo); trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate); amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); @@ -198,7 +198,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, { struct amdgpu_ib *ib = p->job->ibs; - pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo)); + pe += amdgpu_bo_gpu_offset_no_check(bo); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); if (count < 3) { amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 4340d08f7607..439925477fb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -308,7 +308,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = { .show = amdgpu_xgmi_show_attrs, }; -struct kobj_type amdgpu_xgmi_hive_type = { +static const struct kobj_type amdgpu_xgmi_hive_type = { .release = amdgpu_xgmi_hive_release, .sysfs_ops = &amdgpu_xgmi_hive_ops, .default_groups = amdgpu_xgmi_hive_groups, @@ -1048,12 +1048,30 @@ struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = { struct amdgpu_xgmi_ras xgmi_ras = { .ras_block = { - .ras_comm = { - .name = "xgmi_wafl", - .block = AMDGPU_RAS_BLOCK__XGMI_WAFL, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - }, .hw_ops = &xgmi_ras_hw_ops, .ras_late_init = amdgpu_xgmi_ras_late_init, }, }; + +int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_xgmi_ras *ras; + + if (!adev->gmc.xgmi.ras) + return 0; + + ras = adev->gmc.xgmi.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 30dcc1681b4e..86fbf56938f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -73,5 +73,6 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, adev->gmc.xgmi.hive_id && adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id); } +int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 6983acc456b2..f5b5ce1051a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4461,6 +4461,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; else ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; + ring->vm_hub = AMDGPU_GFXHUB_0; sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; @@ -4489,6 +4490,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX10_MEC_HPD_SIZE); + ring->vm_hub = AMDGPU_GFXHUB_0; sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -7266,7 +7268,6 @@ static int gfx_v10_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - uint32_t tmp; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -7285,17 +7286,9 @@ static int gfx_v10_0_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) { gfx_v10_0_cp_gfx_enable(adev, false); - /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { - tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid); - tmp &= 0xffffff00; - WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp); - } else { - tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); - tmp &= 0xffffff00; - WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); - } - + /* Remove the steps of clearing KIQ position. + * It causes GFX hang when another Win guest is rendering. + */ return 0; } gfx_v10_0_cp_enable(adev, false); @@ -9258,7 +9251,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v10_0_ring_get_rptr_gfx, .get_wptr = gfx_v10_0_ring_get_wptr_gfx, .set_wptr = gfx_v10_0_ring_set_wptr_gfx, @@ -9313,7 +9305,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v10_0_ring_get_rptr_compute, .get_wptr = gfx_v10_0_ring_get_wptr_compute, .set_wptr = gfx_v10_0_ring_set_wptr_compute, @@ -9349,7 +9340,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v10_0_ring_get_rptr_compute, .get_wptr = gfx_v10_0_ring_get_wptr_compute, .set_wptr = gfx_v10_0_ring_set_wptr_compute, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index ecf8ceb53311..a9da0486467a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -866,6 +866,7 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; else ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; + ring->vm_hub = AMDGPU_GFXHUB_0; sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; @@ -896,6 +897,7 @@ static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX11_MEC_HPD_SIZE); + ring->vm_hub = AMDGPU_GFXHUB_0; sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -4671,11 +4673,24 @@ static int gfx_v11_0_post_soft_reset(void *handle) static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; + uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) | - ((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL); + if (amdgpu_sriov_vf(adev)) { + clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); + clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); + clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); + if (clock_counter_hi_pre != clock_counter_hi_after) + clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); + } else { + clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); + clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); + if (clock_counter_hi_pre != clock_counter_hi_after) + clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + } + clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); amdgpu_gfx_off_ctrl(adev, true); return clock; @@ -6191,7 +6206,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v11_0_ring_get_rptr_gfx, .get_wptr = gfx_v11_0_ring_get_wptr_gfx, .set_wptr = gfx_v11_0_ring_set_wptr_gfx, @@ -6239,7 +6253,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v11_0_ring_get_rptr_compute, .get_wptr = gfx_v11_0_ring_get_wptr_compute, .set_wptr = gfx_v11_0_ring_set_wptr_compute, @@ -6275,7 +6288,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v11_0_ring_get_rptr_compute, .get_wptr = gfx_v11_0_ring_get_wptr_compute, .set_wptr = gfx_v11_0_ring_set_wptr_compute, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c index b07a72ca25d9..068b9586a223 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c @@ -62,10 +62,18 @@ static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev, return -EINVAL; } - ih_data.head = *ras_if; - dev_warn(adev->dev, "RLC %s FED IRQ\n", ras_if->name); - amdgpu_ras_interrupt_dispatch(adev, &ih_data); + + if (!amdgpu_sriov_vf(adev)) { + ih_data.head = *ras_if; + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + } else { + if (adev->virt.ops && adev->virt.ops->ras_poison_handler) + adev->virt.ops->ras_poison_handler(adev); + else + dev_warn(adev->dev, + "No ras_poison_handler interface in SRIOV for %s!\n", ras_if->name); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ae09fc1cfe6b..adbcd8127c82 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -149,6 +149,16 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin"); #define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_UPPER_Raven 0x007a +#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_LOWER_Raven 0x007b +#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0 + +#define mmGOLDEN_TSC_COUNT_UPPER_Raven2 0x0068 +#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_LOWER_Raven2 0x0069 +#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0 + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -1995,6 +2005,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); + ring->vm_hub = AMDGPU_GFXHUB_0; sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -2094,6 +2105,7 @@ static int gfx_v9_0_sw_init(void *handle) /* disable scheduler on the real ring */ ring->no_scheduler = true; + ring->vm_hub = AMDGPU_GFXHUB_0; r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -2111,6 +2123,7 @@ static int gfx_v9_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; ring->is_sw_ring = true; hw_prio = amdgpu_sw_ring_priority(i); + ring->vm_hub = AMDGPU_GFXHUB_0; r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio, NULL); @@ -3988,6 +4001,36 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) preempt_enable(); clock = clock_lo | (clock_hi << 32ULL); break; + case IP_VERSION(9, 1, 0): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); + hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); + /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; + case IP_VERSION(9, 2, 2): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); + hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); + /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; default: amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); @@ -6750,7 +6793,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v9_0_ring_get_rptr_gfx, .get_wptr = gfx_v9_0_ring_get_wptr_gfx, .set_wptr = gfx_v9_0_ring_set_wptr_gfx, @@ -6804,7 +6846,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = amdgpu_sw_ring_get_rptr_gfx, .get_wptr = amdgpu_sw_ring_get_wptr_gfx, .set_wptr = amdgpu_sw_ring_set_wptr_gfx, @@ -6858,7 +6899,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v9_0_ring_get_rptr_compute, .get_wptr = gfx_v9_0_ring_get_wptr_compute, .set_wptr = gfx_v9_0_ring_set_wptr_compute, @@ -6897,7 +6937,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v9_0_ring_get_rptr_compute, .get_wptr = gfx_v9_0_ring_get_wptr_compute, .set_wptr = gfx_v9_0_ring_set_wptr_compute, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c new file mode 100644 index 000000000000..5f8500577c02 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -0,0 +1,430 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/firmware.h> + +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "soc15.h" +#include "soc15_common.h" +#include "vega10_enum.h" + +#include "gc/gc_9_4_3_offset.h" +#include "gc/gc_9_4_3_sh_mask.h" + +#include "gfx_v9_4_3.h" + +#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L + +static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) +{ + uint64_t clock; + + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); + WREG32_SOC15(GC, 0, regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, regRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); + + return clock; +} + +static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, + u32 se_num, + u32 sh_num, + u32 instance) +{ + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, + INSTANCE_INDEX, instance); + + if (se_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SH_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + + WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data); +} + +static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) +{ + WREG32_SOC15_RLC(GC, 0, regSQ_IND_INDEX, + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | + (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | + (address << SQ_IND_INDEX__INDEX__SHIFT) | + (SQ_IND_INDEX__FORCE_READ_MASK)); + return RREG32_SOC15(GC, 0, regSQ_IND_DATA); +} + +static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t thread, + uint32_t regno, uint32_t num, uint32_t *out) +{ + WREG32_SOC15_RLC(GC, 0, regSQ_IND_INDEX, + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | + (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | + (regno << SQ_IND_INDEX__INDEX__SHIFT) | + (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | + (SQ_IND_INDEX__FORCE_READ_MASK) | + (SQ_IND_INDEX__AUTO_INCR_MASK)); + while (num--) + *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); +} + +static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, + uint32_t simd, uint32_t wave, + uint32_t *dst, int *no_fields) +{ + /* type 1 wave data */ + dst[(*no_fields)++] = 1; + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); +} + +static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t start, + uint32_t size, uint32_t *dst) +{ + wave_read_regs(adev, simd, wave, 0, + start + SQIND_WAVE_SGPRS_OFFSET, size, dst); +} + +static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t thread, + uint32_t start, uint32_t size, + uint32_t *dst) +{ + wave_read_regs(adev, simd, wave, thread, + start + SQIND_WAVE_VGPRS_OFFSET, size, dst); +} + +static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, + u32 me, u32 pipe, u32 q, u32 vm) +{ + soc15_grbm_select(adev, me, pipe, q, vm); +} + +static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) +{ + uint32_t rlc_setting; + + /* if RLC is not enabled, do nothing */ + rlc_setting = RREG32_SOC15(GC, 0, regRLC_CNTL); + if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) + return false; + + return true; +} + +static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev) +{ + uint32_t data; + unsigned i; + + data = RLC_SAFE_MODE__CMD_MASK; + data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); + WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); + + /* wait for RLC_SAFE_MODE */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) + break; + udelay(1); + } +} + +static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RLC_SAFE_MODE__CMD_MASK; + WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); +} + +static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) +{ + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + + return 0; +} + +static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev) +{ + u32 i, j, k; + u32 mask; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + for (k = 0; k < adev->usec_timeout; k++) { + if (RREG32_SOC15(GC, 0, regRLC_SERDES_CU_MASTER_BUSY) == 0) + break; + udelay(1); + } + if (k == adev->usec_timeout) { + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, + 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + DRM_INFO("Timeout wait for RLC serdes %u,%u\n", + i, j); + return; + } + } + } + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; + for (k = 0; k < adev->usec_timeout; k++) { + if ((RREG32_SOC15(GC, 0, regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) + break; + udelay(1); + } +} + +static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + /* These interrupts should be enabled to drive DS clock */ + + tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); + + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); + if (adev->gfx.num_gfx_rings) + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); + + WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); +} + +static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) +{ + WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0); + gfx_v9_4_3_enable_gui_idle_interrupt(adev, false); + gfx_v9_4_3_wait_for_rlc_serdes(adev); +} + +static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) +{ + WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + udelay(50); + WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); + udelay(50); +} + +static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) +{ +#ifdef AMDGPU_RLC_DEBUG_RETRY + u32 rlc_ucode_ver; +#endif + + WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); + udelay(50); + + /* carrizo do enable cp interrupt after cp inited */ + if (!(adev->flags & AMD_IS_APU)) { + gfx_v9_4_3_enable_gui_idle_interrupt(adev, true); + udelay(50); + } + +#ifdef AMDGPU_RLC_DEBUG_RETRY + /* RLC_GPM_GENERAL_6 : RLC Ucode version */ + rlc_ucode_ver = RREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_6); + if (rlc_ucode_ver == 0x108) { + dev_info(adev->dev, + "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", + rlc_ucode_ver, adev->gfx.rlc_fw_version); + /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, + * default is 0x9C4 to create a 100us interval */ + WREG32_SOC15(GC, 0, regRLC_GPM_TIMER_INT_3, 0x9C4); + /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr + * to disable the page fault retry interrupts, default is + * 0x100 (256) */ + WREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_12, 0x100); + } +#endif +} + +static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_0 *hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + if (!adev->gfx.rlc_fw) + return -EINVAL; + + hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + amdgpu_ucode_print_rlc_hdr(&hdr->header); + + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, + RLCG_UCODE_LOADING_START_ADDRESS); + for (i = 0; i < fw_size; i++) { + if (amdgpu_emu_mode == 1 && i % 100 == 0) { + dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i); + msleep(1); + } + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); + } + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); + + return 0; +} + +static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) +{ + int r; + + adev->gfx.rlc.funcs->stop(adev); + + /* disable CG */ + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); + + /* TODO: revisit pg function */ + /* gfx_v9_4_3_init_pg(adev);*/ + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + /* legacy rlc firmware loading */ + r = gfx_v9_4_3_rlc_load_microcode(adev); + if (r) + return r; + } + + adev->gfx.rlc.funcs->start(adev); + + return 0; +} + +static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 reg, data; + + reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); + if (amdgpu_sriov_is_pp_one_vf(adev)) + data = RREG32_NO_KIQ(reg); + else + data = RREG32(reg); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + if (amdgpu_sriov_is_pp_one_vf(adev)) + WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); + else + WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); +} + +static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = { + {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)}, + {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)}, +}; + +static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev, + uint32_t offset, + struct soc15_reg_rlcg *entries, int arr_size) +{ + int i; + uint32_t reg; + + if (!entries) + return false; + + for (i = 0; i < arr_size; i++) { + const struct soc15_reg_rlcg *entry; + + entry = &entries[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + if (offset == reg) + return true; + } + + return false; +} + +static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) +{ + return gfx_v9_4_3_check_rlcg_range(adev, offset, + (void *)rlcg_access_gc_9_4_3, + ARRAY_SIZE(rlcg_access_gc_9_4_3)); +} + +const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, + .select_se_sh = &gfx_v9_4_3_select_se_sh, + .read_wave_data = &gfx_v9_4_3_read_wave_data, + .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, + .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, + .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, +}; + +const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { + .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled, + .set_safe_mode = gfx_v9_4_3_set_safe_mode, + .unset_safe_mode = gfx_v9_4_3_unset_safe_mode, + .init = gfx_v9_4_3_rlc_init, + .resume = gfx_v9_4_3_rlc_resume, + .stop = gfx_v9_4_3_rlc_stop, + .reset = gfx_v9_4_3_rlc_reset, + .start = gfx_v9_4_3_rlc_start, + .update_spm_vmid = gfx_v9_4_3_update_spm_vmid, + .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h new file mode 100644 index 000000000000..84e69701b81a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h @@ -0,0 +1,30 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V9_4_3_H__ +#define __GFX_V9_4_3_H__ + +extern const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs; +extern const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs; + +#endif /* __GFX_V9_4_3_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c new file mode 100644 index 000000000000..c59c6c85fbff --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -0,0 +1,471 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "gfxhub_v1_2.h" +#include "gfxhub_v1_1.h" + +#include "gc/gc_9_4_3_offset.h" +#include "gc/gc_9_4_3_sh_mask.h" +#include "vega10_enum.h" + +#include "soc15_common.h" + +#define regVM_L2_CNTL3_DEFAULT 0x80100007 +#define regVM_L2_CNTL4_DEFAULT 0x000000c1 + +static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev) +{ + return (u64)RREG32_SOC15(GC, 0, regMC_VM_FB_OFFSET) << 24; +} + +static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, + uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + + WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); + + WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); +} + +static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t pt_base; + + if (adev->gmc.pdb0_bo) + pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); + else + pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + gfxhub_v1_2_setup_vm_pt_regs(adev, 0, pt_base); + + /* If use GART for FB translation, vmid0 page table covers both + * vram and system memory (gart) + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.fb_start >> 12)); + WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.fb_start >> 44)); + + WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } else { + WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } +} + +static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t value; + uint32_t tmp; + + /* Program the AGP BAR */ + WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BASE, 0); + WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { + /* Program the system aperture low logical page number. */ + WREG32_SOC15_RLC(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + /* + * Raven2 has a HW issue that it is unable to use the + * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. + * So here is the workaround that increase system + * aperture high address (add 1) to get rid of the VM + * fault and hardware hang. + */ + WREG32_SOC15_RLC(GC, 0, + regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max((adev->gmc.fb_end >> 18) + 0x1, + adev->gmc.agp_end >> 18)); + else + WREG32_SOC15_RLC(GC, 0, + regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); + WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); + } + + /* In the case squeezing vram into GART aperture, we don't use + * FB aperture and AGP aperture. Disable them. + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_TOP, 0); + WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); + WREG32_SOC15(GC, 0, regMC_VM_AGP_TOP, 0); + WREG32_SOC15(GC, 0, regMC_VM_AGP_BOT, 0xFFFFFF); + WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); + WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + } +} + +static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup TLB control */ + tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL); + + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC);/* XXX for emulation. */ + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); + + WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); +} + +static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup L2 cache */ + tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, + 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL, tmp); + + tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL2, tmp); + + tmp = regVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL3, tmp); + + tmp = regVM_L2_CNTL4_DEFAULT; + if (adev->gmc.xgmi.connected_to_cpu) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + } + WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL4, tmp); +} + +static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) +{ + uint32_t tmp; + + tmp = RREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, + adev->gmc.vmid0_page_table_depth); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, + adev->gmc.vmid0_page_table_block_size); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL, tmp); +} + +static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) +{ + WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + 0XFFFFFFFF); + WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + 0x0000000F); + + WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, + 0); + WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, + 0); + + WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); + WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); + +} + +static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + unsigned num_level, block_size; + uint32_t tmp; + int i; + + num_level = adev->vm_manager.num_level; + block_size = adev->vm_manager.block_size; + if (adev->gmc.translate_further) + num_level -= 1; + else + block_size -= 9; + + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL, i); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, + num_level); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, + 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + block_size); + /* Send no-retry XNACK on fault to suppress VM fault storm. + * On Aldebaran, XNACK can be enabled in the SQ per-process. + * Retry faults need to be enabled for that to work. + */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !adev->gmc.noretry || + adev->asic_type == CHIP_ALDEBARAN); + WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(GC, 0, + regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, + regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, + regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(GC, 0, + regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } +} + +static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + unsigned i; + + for (i = 0 ; i < 18; ++i) { + WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } +} + +static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) { + /* + * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are + * VF copy registers so vbios post doesn't program them, for + * SRIOV driver need to program them + */ + WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_BASE, + adev->gmc.vram_start >> 24); + WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_TOP, + adev->gmc.vram_end >> 24); + } + + /* GART Enable. */ + gfxhub_v1_2_init_gart_aperture_regs(adev); + gfxhub_v1_2_init_system_aperture_regs(adev); + gfxhub_v1_2_init_tlb_regs(adev); + if (!amdgpu_sriov_vf(adev)) + gfxhub_v1_2_init_cache_regs(adev); + + gfxhub_v1_2_enable_system_domain(adev); + if (!amdgpu_sriov_vf(adev)) + gfxhub_v1_2_disable_identity_aperture(adev); + gfxhub_v1_2_setup_vmid_config(adev); + gfxhub_v1_2_program_invalidation(adev); + + return 0; +} + +static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + u32 tmp; + u32 i; + + /* Disable all tables */ + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); + + /* Setup TLB control */ + tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, + MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, + 0); + WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); + + /* Setup L2 cache */ + tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(GC, 0, regVM_L2_CNTL, tmp); + WREG32_SOC15(GC, 0, regVM_L2_CNTL3, 0); +} + +/** + * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, + bool value) +{ + u32 tmp; + tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, + VM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp); +} + +static void gfxhub_v1_2_init(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + + hub->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(GC, 0, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); + hub->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(GC, 0, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_SEM); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(GC, 0, regVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; +} + + +const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { + .get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset, + .setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs, + .gart_enable = gfxhub_v1_2_gart_enable, + .gart_disable = gfxhub_v1_2_gart_disable, + .set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default, + .init = gfxhub_v1_2_init, + .get_xgmi_info = gfxhub_v1_1_get_xgmi_info, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h new file mode 100644 index 000000000000..e2d508f5a7ee --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h @@ -0,0 +1,29 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFXHUB_V1_2_H__ +#define __GFXHUB_V1_2_H__ + +extern const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c index be0d0f47415e..13712640fa46 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c @@ -417,34 +417,12 @@ static void gfxhub_v3_0_set_fault_enable_default(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, CP_DEBUG, CPG_UTCL1_ERROR_HALT_DISABLE, 1); WREG32_SOC15(GC, 0, regCP_DEBUG, tmp); - /** - * Set GRBM_GFX_INDEX in broad cast mode - * before programming GL1C_UTCL0_CNTL1 and SQG_CONFIG - */ - WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, regGRBM_GFX_INDEX_DEFAULT); - - /** - * Retry respond mode: RETRY - * Error (no retry) respond mode: SUCCESS - */ - tmp = RREG32_SOC15(GC, 0, regGL1C_UTCL0_CNTL1); - tmp = REG_SET_FIELD(tmp, GL1C_UTCL0_CNTL1, RESP_MODE, 0); - tmp = REG_SET_FIELD(tmp, GL1C_UTCL0_CNTL1, RESP_FAULT_MODE, 0x2); - WREG32_SOC15(GC, 0, regGL1C_UTCL0_CNTL1, tmp); - /* These registers are not accessible to VF-SRIOV. * The PF will program them instead. */ if (amdgpu_sriov_vf(adev)) return; - /* Disable SQ XNACK interrupt for all VMIDs */ - tmp = RREG32_SOC15(GC, 0, regSQG_CONFIG); - tmp = REG_SET_FIELD(tmp, SQG_CONFIG, XNACK_INTR_MASK, - SQG_CONFIG__XNACK_INTR_MASK_MASK >> - SQG_CONFIG__XNACK_INTR_MASK__SHIFT); - WREG32_SOC15(GC, 0, regSQG_CONFIG, tmp); - tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index ab2556ca984e..b213dcf8ca06 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -479,8 +479,8 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; @@ -534,7 +534,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid if (ring->is_mes_queue) return; - if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB_0) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -699,25 +699,8 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) default: break; } - if (adev->umc.ras) { - amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); - - strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); - adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; - adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; - - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->umc.ras->ras_block.ras_late_init) - adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; - - /* If not defined special ras_cb function, use default ras_cb */ - if (!adev->umc.ras->ras_block.ras_cb) - adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; - } } - static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) { switch (adev->ip_versions[MMHUB_HWIP][0]) { @@ -754,7 +737,6 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) static int gmc_v10_0_early_init(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gmc_v10_0_set_mmhub_funcs(adev); @@ -770,10 +752,6 @@ static int gmc_v10_0_early_init(void *handle) adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; - r = amdgpu_gmc_ras_early_init(adev); - if (r) - return r; - return 0; } @@ -1024,6 +1002,10 @@ static int gmc_v10_0_sw_init(void *handle) amdgpu_vm_manager_init(adev); + r = amdgpu_gmc_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -1161,7 +1143,6 @@ static int gmc_v10_0_hw_fini(void *handle) return 0; } - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index af7b3ba1ca00..d95f9fe8f1c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -274,6 +274,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * * @adev: amdgpu_device pointer * @vmid: vm instance to flush + * @vmhub: which hub to flush + * @flush_type: the flush type * * Flush the TLB for the requested page table. */ @@ -313,6 +315,8 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * * @adev: amdgpu_device pointer * @pasid: pasid to be flush + * @flush_type: the flush type + * @all_hub: flush all hubs * * Flush the TLB for the requested pasid. */ @@ -374,8 +378,8 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; @@ -429,7 +433,7 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid if (ring->is_mes_queue) return; - if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB_0) reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; @@ -581,23 +585,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) default: break; } - - if (adev->umc.ras) { - amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); - - strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); - adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; - adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; - - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->umc.ras->ras_block.ras_late_init) - adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; - - /* If not define special ras_cb function, use default ras_cb */ - if (!adev->umc.ras->ras_block.ras_cb) - adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; - } } @@ -846,6 +833,10 @@ static int gmc_v11_0_sw_init(void *handle) amdgpu_vm_manager_init(adev); + r = amdgpu_gmc_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -875,6 +866,12 @@ static int gmc_v11_0_sw_fini(void *handle) static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + + WREG32(hub->vm_contexts_disable, 0); + return; + } } /** @@ -954,7 +951,6 @@ static int gmc_v11_0_hw_fini(void *handle) return 0; } - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v11_0_gart_disable(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b06170c00dfc..2fe21cefd772 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -49,8 +49,10 @@ #include "mmhub_v1_0.h" #include "athub_v1_0.h" #include "gfxhub_v1_1.h" +#include "gfxhub_v1_2.h" #include "mmhub_v9_4.h" #include "mmhub_v1_7.h" +#include "mmhub_v1_8.h" #include "umc_v6_1.h" #include "umc_v6_0.h" #include "umc_v6_7.h" @@ -553,32 +555,49 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, const char *mmhub_cid; const char *hub_name; u64 addr; + uint32_t cam_index = 0; + int ret; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; if (retry_fault) { - /* Returning 1 here also prevents sending the IV to the KFD */ + if (adev->irq.retry_cam_enabled) { + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + cam_index = entry->src_data[2] & 0x3ff; - /* Process it onyl if it's the first fault for this address */ - if (entry->ih != &adev->irq.ih_soft && - amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, + ret = amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault); + WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); + if (ret) + return 1; + } else { + /* Process it onyl if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, entry->timestamp)) - return 1; + return 1; - /* Delegate it to a different ring if the hardware hasn't - * already done it. - */ - if (entry->ih == &adev->irq.ih) { - amdgpu_irq_delegate(adev, entry, 8); - return 1; - } + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } - /* Try to handle the recoverable page faults by filling page - * tables - */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) - return 1; + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + return 1; + } } if (!printk_ratelimit()) @@ -657,6 +676,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, case IP_VERSION(2, 4, 0): mmhub_cid = mmhub_client_ids_renoir[cid][rw]; break; + case IP_VERSION(1, 8, 0): case IP_VERSION(9, 4, 2): mmhub_cid = mmhub_client_ids_aldebaran[cid][rw]; break; @@ -735,7 +755,8 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) return false; return ((vmhub == AMDGPU_MMHUB_0 || @@ -986,9 +1007,9 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); + bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); struct amdgpu_device *adev = ring->adev; - struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; @@ -1039,10 +1060,10 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, uint32_t reg; /* Do nothing because there's no lut register for mmhub1. */ - if (ring->funcs->vmhub == AMDGPU_MMHUB_1) + if (ring->vm_hub == AMDGPU_MMHUB_1) return; - if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB_0) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -1144,6 +1165,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): + case IP_VERSION(9, 4, 3): if (is_vram) { if (bo_adev == adev) { if (uncached) @@ -1155,8 +1177,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, /* FIXME: is this still needed? Or does * amdgpu_ttm_tt_pde_flags already handle this? */ - if (adev->ip_versions[GC_HWIP][0] == - IP_VERSION(9, 4, 2) && + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) && adev->gmc.xgmi.connected_to_cpu) snoop = true; } else { @@ -1318,23 +1340,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) default: break; } - - if (adev->umc.ras) { - amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); - - strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); - adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; - adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; - - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->umc.ras->ras_block.ras_late_init) - adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; - - /* If not defined special ras_cb function, use default ras_cb */ - if (!adev->umc.ras->ras_block.ras_cb) - adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; - } } static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) @@ -1346,6 +1351,9 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) case IP_VERSION(9, 4, 2): adev->mmhub.funcs = &mmhub_v1_7_funcs; break; + case IP_VERSION(1, 8, 0): + adev->mmhub.funcs = &mmhub_v1_8_funcs; + break; default: adev->mmhub.funcs = &mmhub_v1_0_funcs; break; @@ -1368,45 +1376,47 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) /* mmhub ras is not available */ break; } - - if (adev->mmhub.ras) { - amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block); - - strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub"); - adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB; - adev->mmhub.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->mmhub.ras_if = &adev->mmhub.ras->ras_block.ras_comm; - } } static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - adev->gfxhub.funcs = &gfxhub_v1_0_funcs; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + adev->gfxhub.funcs = &gfxhub_v1_2_funcs; + else + adev->gfxhub.funcs = &gfxhub_v1_0_funcs; } static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) { adev->hdp.ras = &hdp_v4_0_ras; - amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block); - adev->hdp.ras_if = &adev->hdp.ras->ras_block.ras_comm; } -static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) +static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev) { + struct amdgpu_mca *mca = &adev->mca; + /* is UMC the right IP to check for MCA? Maybe DF? */ switch (adev->ip_versions[UMC_HWIP][0]) { case IP_VERSION(6, 7, 0): - if (!adev->gmc.xgmi.connected_to_cpu) - adev->mca.funcs = &mca_v3_0_funcs; + if (!adev->gmc.xgmi.connected_to_cpu) { + mca->mp0.ras = &mca_v3_0_mp0_ras; + mca->mp1.ras = &mca_v3_0_mp1_ras; + mca->mpio.ras = &mca_v3_0_mpio_ras; + } break; default: break; } } +static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) +{ + if (!adev->gmc.xgmi.connected_to_cpu) + adev->gmc.xgmi.ras = &xgmi_ras; +} + static int gmc_v9_0_early_init(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */ @@ -1427,7 +1437,8 @@ static int gmc_v9_0_early_init(void *handle) gmc_v9_0_set_mmhub_ras_funcs(adev); gmc_v9_0_set_gfxhub_funcs(adev); gmc_v9_0_set_hdp_ras_funcs(adev); - gmc_v9_0_set_mca_funcs(adev); + gmc_v9_0_set_mca_ras_funcs(adev); + gmc_v9_0_set_xgmi_ras_funcs(adev); adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_end = @@ -1436,10 +1447,6 @@ static int gmc_v9_0_early_init(void *handle) adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; - r = amdgpu_gmc_ras_early_init(adev); - if (r) - return r; - return 0; } @@ -1565,6 +1572,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): + case IP_VERSION(9, 4, 3): default: adev->gmc.gart_size = 512ULL << 20; break; @@ -1644,8 +1652,6 @@ static int gmc_v9_0_sw_init(void *handle) adev->gfxhub.funcs->init(adev); adev->mmhub.funcs->init(adev); - if (adev->mca.funcs) - adev->mca.funcs->init(adev); spin_lock_init(&adev->gmc.invalidate_lock); @@ -1696,6 +1702,7 @@ static int gmc_v9_0_sw_init(void *handle) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 3, 0): case IP_VERSION(9, 4, 2): + case IP_VERSION(9, 4, 3): adev->num_vmhubs = 2; @@ -1792,12 +1799,17 @@ static int gmc_v9_0_sw_init(void *handle) */ adev->vm_manager.first_kfd_vmid = (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8; + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8; amdgpu_vm_manager_init(adev); gmc_v9_0_save_registers(adev); + r = amdgpu_gmc_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -1987,7 +1999,6 @@ static int gmc_v9_0_hw_fini(void *handle) if (adev->mmhub.funcs->update_power_gating) adev->mmhub.funcs->update_power_gating(adev, false); - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index adf89680f53e..71d1a2e3bac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -49,7 +49,8 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0)) + if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) || + adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2)) return; if (!ring || !ring->funcs->emit_wreg) @@ -160,11 +161,6 @@ struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = { struct amdgpu_hdp_ras hdp_v4_0_ras = { .ras_block = { - .ras_comm = { - .name = "hdp", - .block = AMDGPU_RAS_BLOCK__HDP, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - }, .hw_ops = &hdp_v4_0_ras_hw_ops, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index 7cd79a3844b2..b02e1cef78a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -119,7 +119,7 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev, * ih_v6_0_toggle_ring_interrupts - toggle the interrupt ring buffer * * @adev: amdgpu_device pointer - * @ih: amdgpu_ih_ring pointet + * @ih: amdgpu_ih_ring pointer * @enable: true - enable the interrupts, false - disable the interrupts * * Toggle the interrupt ring buffer (IH_V6_0) @@ -381,6 +381,7 @@ static void ih_v6_0_irq_disable(struct amdgpu_device *adev) * ih_v6_0_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer. Also check for @@ -425,6 +426,7 @@ out: * ih_v6_0_irq_rearm - rearm IRQ if lost * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer * */ static void ih_v6_0_irq_rearm(struct amdgpu_device *adev, @@ -450,6 +452,7 @@ static void ih_v6_0_irq_rearm(struct amdgpu_device *adev, * ih_v6_0_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer * * Set the IH ring buffer rptr. */ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 9360204da7fb..a3076eb8af6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -376,7 +376,7 @@ static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring, static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t data0, data1, mask; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -485,6 +485,7 @@ int jpeg_v1_0_sw_init(void *handle) return r; ring = &adev->jpeg.inst->ring_dec; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -548,7 +549,6 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { .nop = PACKET0(0x81ff, 0), .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB_0, .extra_dw = 64, .get_rptr = jpeg_v1_0_decode_ring_get_rptr, .get_wptr = jpeg_v1_0_decode_ring_get_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index f3c1af5130ab..0eddf7c824a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -86,6 +86,7 @@ static int jpeg_v2_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -613,7 +614,7 @@ void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t data0, data1, mask; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -762,7 +763,6 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = jpeg_v2_0_dec_ring_get_rptr, .get_wptr = jpeg_v2_0_dec_ring_get_wptr, .set_wptr = jpeg_v2_0_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index f2b743a93915..b040f51d9aa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -127,6 +127,10 @@ static int jpeg_v2_5_sw_init(void *handle) ring = &adev->jpeg.inst[i].ring_dec; ring->use_doorbell = true; + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) + ring->vm_hub = AMDGPU_MMHUB_1; + else + ring->vm_hub = AMDGPU_MMHUB_0; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; sprintf(ring->name, "jpeg_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, @@ -138,6 +142,10 @@ static int jpeg_v2_5_sw_init(void *handle) adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); } + r = amdgpu_jpeg_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -641,7 +649,6 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_1, .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, @@ -671,7 +678,6 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, @@ -806,6 +812,4 @@ static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev) default: break; } - - jpeg_set_ras_funcs(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index a1b751d9ac06..c55e09432e26 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -100,6 +100,7 @@ static int jpeg_v3_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -559,7 +560,6 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = jpeg_v3_0_dec_ring_get_rptr, .get_wptr = jpeg_v3_0_dec_ring_get_wptr, .set_wptr = jpeg_v3_0_dec_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 3beb731b2ce5..77e1e64aa1d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -28,6 +28,7 @@ #include "soc15d.h" #include "jpeg_v2_0.h" #include "jpeg_v4_0.h" +#include "mmsch_v4_0.h" #include "vcn/vcn_4_0_0_offset.h" #include "vcn/vcn_4_0_0_sh_mask.h" @@ -35,12 +36,15 @@ #define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f +static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev); static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev); static int jpeg_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state); static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev); +static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring); + /** * jpeg_v4_0_early_init - set function pointers * @@ -103,7 +107,9 @@ static int jpeg_v4_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; + ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); + ring->vm_hub = AMDGPU_MMHUB_0; + sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -113,6 +119,10 @@ static int jpeg_v4_0_sw_init(void *handle) adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET; adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + r = amdgpu_jpeg_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -149,16 +159,26 @@ static int jpeg_v4_0_hw_init(void *handle) struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; int r; - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + if (amdgpu_sriov_vf(adev)) { + r = jpeg_v4_0_start_sriov(adev); + if (r) + return r; + ring->wptr = 0; + ring->wptr_old = 0; + jpeg_v4_0_dec_ring_set_wptr(ring); + ring->sched.ready = true; + } else { + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); - WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, - ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | - VCN_JPEG_DB_CTRL__EN_MASK); + WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); - r = amdgpu_ring_test_helper(ring); - if (r) - return r; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); @@ -177,11 +197,11 @@ static int jpeg_v4_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; cancel_delayed_work_sync(&adev->vcn.idle_work); - - if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && - RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) - jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); - + if (!amdgpu_sriov_vf(adev)) { + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) + jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + } amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); return 0; @@ -386,6 +406,120 @@ static int jpeg_v4_0_start(struct amdgpu_device *adev) return 0; } +static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw; + uint32_t init_status; + + struct mmsch_v4_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v4_0_cmd_end end = { {0} }; + struct mmsch_v4_0_init_header header; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + end.cmd_header.command_type = + MMSCH_COMMAND__END; + + header.version = MMSCH_VERSION; + header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE); + + header.jpegdec.init_status = 0; + header.jpegdec.table_offset = 0; + header.jpegdec.table_size = 0; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + table_size = 0; + + ring = &adev->jpeg.inst->ring_dec; + + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0, + regUVD_LMI_JRBC_RB_64BIT_BAR_LOW), + lower_32_bits(ring->gpu_addr)); + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0, + regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH), + upper_32_bits(ring->gpu_addr)); + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0, + regUVD_JRBC_RB_SIZE), ring->ring_size / 4); + + /* add end packet */ + MMSCH_V4_0_INSERT_END(); + + /* refine header */ + header.jpegdec.init_status = 0; + header.jpegdec.table_offset = header.total_size; + header.jpegdec.table_size = table_size; + header.total_size += table_size; + + /* Update init table header in memory */ + size = sizeof(struct mmsch_v4_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + /* message MMSCH (in VCN[0]) to initialize this client + * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr + * of memory descriptor location + */ + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + /* 2, update vmid of descriptor */ + tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + /* use domain0 for MM scheduler */ + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp); + + /* 3, notify mmsch about the size of this descriptor */ + size = header.total_size; + WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size); + + /* 4, set resp to zero */ + WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0); + + /* 5, kick off the initialization and wait until + * MMSCH_VF_MAILBOX_RESP becomes non-zero + */ + param = 0x00000001; + WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->jpegdec.init_status; + while (resp != expected) { + resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP); + + if (resp != 0) + break; + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && init_status != MMSCH_VF_ENGINE_STATUS__PASS) + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", resp, init_status); + + return 0; + +} + /** * jpeg_v4_0_stop - stop JPEG block * @@ -509,6 +643,11 @@ static int jpeg_v4_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; + if (amdgpu_sriov_vf(adev)) { + adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->jpeg.cur_state) return 0; @@ -577,7 +716,6 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = jpeg_v4_0_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_dec_ring_set_wptr, @@ -685,6 +823,4 @@ static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev) default: break; } - - jpeg_set_ras_funcs(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c index d4bd7d1d2649..6dae4a2e2767 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -51,19 +51,13 @@ static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj, return -EINVAL; } -const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = { +static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = { .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count, .query_ras_error_address = NULL, }; struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = { .ras_block = { - .ras_comm = { - .block = AMDGPU_RAS_BLOCK__MCA, - .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .name = "mp0", - }, .hw_ops = &mca_v3_0_mp0_hw_ops, .ras_block_match = mca_v3_0_ras_block_match, }, @@ -77,19 +71,13 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = { +static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = { .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count, .query_ras_error_address = NULL, }; struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = { .ras_block = { - .ras_comm = { - .block = AMDGPU_RAS_BLOCK__MCA, - .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .name = "mp1", - }, .hw_ops = &mca_v3_0_mp1_hw_ops, .ras_block_match = mca_v3_0_ras_block_match, }, @@ -103,40 +91,14 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = { +static const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = { .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count, .query_ras_error_address = NULL, }; struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = { .ras_block = { - .ras_comm = { - .block = AMDGPU_RAS_BLOCK__MCA, - .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .name = "mpio", - }, .hw_ops = &mca_v3_0_mpio_hw_ops, .ras_block_match = mca_v3_0_ras_block_match, }, }; - - -static void mca_v3_0_init(struct amdgpu_device *adev) -{ - struct amdgpu_mca *mca = &adev->mca; - - mca->mp0.ras = &mca_v3_0_mp0_ras; - mca->mp1.ras = &mca_v3_0_mp1_ras; - mca->mpio.ras = &mca_v3_0_mpio_ras; - amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block); - amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block); - amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block); - mca->mp0.ras_if = &mca->mp0.ras->ras_block.ras_comm; - mca->mp1.ras_if = &mca->mp1.ras->ras_block.ras_comm; - mca->mpio.ras_if = &mca->mpio.ras->ras_block.ras_comm; -} - -const struct amdgpu_mca_funcs mca_v3_0_funcs = { - .init = mca_v3_0_init, -};
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h index b899b86194c2..d3eaef0d7f2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h @@ -21,6 +21,8 @@ #ifndef __MCA_V3_0_H__ #define __MCA_V3_0_H__ -extern const struct amdgpu_mca_funcs mca_v3_0_funcs; +extern struct amdgpu_mca_ras_block mca_v3_0_mp0_ras; +extern struct amdgpu_mca_ras_block mca_v3_0_mp1_ras; +extern struct amdgpu_mca_ras_block mca_v3_0_mpio_ras; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 5826eac270d7..45280f047180 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -33,14 +33,19 @@ #include "mes_v11_api_def.h" MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin"); static int mes_v11_0_hw_fini(void *handle); @@ -1089,13 +1094,14 @@ static int mes_v11_0_sw_fini(void *handle) return 0; } -static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev) +static void mes_v11_0_kiq_dequeue(struct amdgpu_ring *ring) { uint32_t data; int i; + struct amdgpu_device *adev = ring->adev; mutex_lock(&adev->srbm_mutex); - soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0); + soc21_grbm_select(adev, 3, ring->pipe, 0, 0); /* disable the queue if it's active */ if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { @@ -1121,8 +1127,6 @@ static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev) soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - - adev->mes.ring.sched.ready = false; } static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring) @@ -1139,6 +1143,16 @@ static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring) WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); } +static void mes_v11_0_kiq_clear(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* tell RLC which is KIQ dequeue */ + tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); + tmp &= ~RLC_CP_SCHEDULERS__scheduler0_MASK; + WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); +} + static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) { int r = 0; @@ -1176,11 +1190,17 @@ failure: static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) { - if (adev->mes.ring.sched.ready) - mes_v11_0_kiq_dequeue_sched(adev); + if (adev->mes.ring.sched.ready) { + mes_v11_0_kiq_dequeue(&adev->mes.ring); + adev->mes.ring.sched.ready = false; + } - if (!amdgpu_sriov_vf(adev)) - mes_v11_0_enable(adev, false); + if (amdgpu_sriov_vf(adev)) { + mes_v11_0_kiq_dequeue(&adev->gfx.kiq.ring); + mes_v11_0_kiq_clear(adev); + } + + mes_v11_0_enable(adev, false); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c new file mode 100644 index 000000000000..342d1702104c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -0,0 +1,477 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "mmhub_v1_8.h" + +#include "mmhub/mmhub_1_8_0_offset.h" +#include "mmhub/mmhub_1_8_0_sh_mask.h" +#include "vega10_enum.h" + +#include "soc15_common.h" +#include "soc15.h" + +#define regVM_L2_CNTL3_DEFAULT 0x80100007 +#define regVM_L2_CNTL4_DEFAULT 0x000000c1 + +static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) +{ + u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE); + u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP); + + base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; + base <<= 24; + + top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; + top <<= 24; + + adev->gmc.fb_start = base; + adev->gmc.fb_end = top; + + return base; +} + +static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); + + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); +} + +static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t pt_base; + + if (adev->gmc.pdb0_bo) + pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); + else + pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base); + + /* If use GART for FB translation, vmid0 page table covers both + * vram and system memory (gart) + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.fb_start >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.fb_start >> 44)); + + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + + } else { + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } +} + +static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t value; + uint32_t tmp; + + /* Program the AGP BAR */ + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BASE, 0); + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* In the case squeezing vram into GART aperture, we don't use + * FB aperture and AGP aperture. Disable them. + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF); + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0); + WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0); + WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + } + if (amdgpu_sriov_vf(adev)) + return; + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); +} + +static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL); + + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC);/* XXX for emulation. */ + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); + + WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); +} + +static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + if (amdgpu_sriov_vf(adev)) + return; + + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, + 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp); + + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2, tmp); + + tmp = regVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, tmp); + + tmp = regVM_L2_CNTL4_DEFAULT; + if (adev->gmc.xgmi.connected_to_cpu) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PDE_REQUEST_PHYSICAL, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PTE_REQUEST_PHYSICAL, 1); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + } + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL4, tmp); +} + +static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) +{ + uint32_t tmp; + + tmp = RREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, + adev->gmc.vmid0_page_table_depth); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, + adev->gmc.vmid0_page_table_block_size); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL, tmp); +} + +static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return; + + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF); + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 0x0000000F); + + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); + + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); +} + +static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + unsigned num_level, block_size; + uint32_t tmp; + int i; + + num_level = adev->vm_manager.num_level; + block_size = adev->vm_manager.block_size; + if (adev->gmc.translate_further) + num_level -= 1; + else + block_size -= 9; + + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, + num_level); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, + 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + block_size); + /* On Aldebaran, XNACK can be enabled in the SQ per-process. + * Retry faults need to be enabled for that to work. + */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + 1); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } +} + +static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + unsigned i; + + for (i = 0; i < 18; ++i) { + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } +} + +static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) { + /* + * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are + * VF copy registers so vbios post doesn't program them, for + * SRIOV driver need to program them + */ + WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, + adev->gmc.vram_start >> 24); + WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, + adev->gmc.vram_end >> 24); + } + + /* GART Enable. */ + mmhub_v1_8_init_gart_aperture_regs(adev); + mmhub_v1_8_init_system_aperture_regs(adev); + mmhub_v1_8_init_tlb_regs(adev); + mmhub_v1_8_init_cache_regs(adev); + + mmhub_v1_8_enable_system_domain(adev); + mmhub_v1_8_disable_identity_aperture(adev); + mmhub_v1_8_setup_vmid_config(adev); + mmhub_v1_8_program_invalidation(adev); + + return 0; +} + +static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + u32 tmp; + u32 i; + + /* Disable all tables */ + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); + + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); + + if (!amdgpu_sriov_vf(adev)) { + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp); + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, 0); + } +} + +/** + * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value) +{ + u32 tmp; + + if (amdgpu_sriov_vf(adev)) + return; + + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + + WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp); +} + +static void mmhub_v1_8_init(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + + hub->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(MMHUB, 0, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); + hub->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(MMHUB, 0, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + +} + +static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + return 0; +} + +static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags) +{ + +} + +const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { + .get_fb_location = mmhub_v1_8_get_fb_location, + .init = mmhub_v1_8_init, + .gart_enable = mmhub_v1_8_gart_enable, + .set_fault_enable_default = mmhub_v1_8_set_fault_enable_default, + .gart_disable = mmhub_v1_8_gart_disable, + .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs, + .set_clockgating = mmhub_v1_8_set_clockgating, + .get_clockgating = mmhub_v1_8_get_clockgating, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h new file mode 100644 index 000000000000..0bb36200e4e5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h @@ -0,0 +1,28 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __MMHUB_V1_8_H__ +#define __MMHUB_V1_8_H__ + +extern const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index 164948c50ac3..17a792616979 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -517,6 +517,9 @@ static void mmhub_v3_0_init(struct amdgpu_device *adev) hub->vm_l2_bank_select_reserved_cid2 = SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_BANK_SELECT_RESERVED_CID2); + hub->vm_contexts_disable = + SOC15_REG_OFFSET(MMHUB, 0, regMMVM_CONTEXTS_DISABLE); + hub->vmhub_funcs = &mmhub_v3_0_vmhub_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h index 0312c71c3af9..83653a50a1a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h @@ -38,6 +38,11 @@ #define MMSCH_VF_MAILBOX_RESP__OK 0x1 #define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2 +#define MMSCH_VF_ENGINE_STATUS__PASS 0x1 + +#define MMSCH_VF_MAILBOX_RESP__OK 0x1 +#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2 + enum mmsch_v4_0_command_type { MMSCH_COMMAND__DIRECT_REG_WRITE = 0, MMSCH_COMMAND__DIRECT_REG_POLLING = 2, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c index 09fdcd20cb91..d5ed9e0e1a5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c @@ -26,6 +26,7 @@ #include "nbio/nbio_4_3_0_offset.h" #include "nbio/nbio_4_3_0_sh_mask.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include <uapi/linux/kfd_ioctl.h> static void nbio_v4_3_remap_hdp_registers(struct amdgpu_device *adev) @@ -538,3 +539,81 @@ const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs = { .remap_hdp_registers = nbio_v4_3_remap_hdp_registers, .get_rom_offset = nbio_v4_3_get_rom_offset, }; + +static int nbio_v4_3_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* The ras_controller_irq enablement should be done in psp bl when it + * tries to enable ras feature. Driver only need to set the correct interrupt + * vector for bare-metal and sriov use case respectively + */ + uint32_t bif_doorbell_int_cntl; + + bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1); + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl); + + return 0; +} + +static int nbio_v4_3_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to bif ring. since bif ring is not enabled, just leave process callback + * as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v4_3_ras_err_event_athub_irq_funcs = { + .set = nbio_v4_3_set_ras_err_event_athub_irq_state, + .process = nbio_v4_3_process_err_event_athub_irq, +}; + +static void nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_int_cntl; + + bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_int_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl); + amdgpu_ras_global_ras_isr(adev); + } +} + +static int nbio_v4_3_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev) +{ + + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v4_3_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt + * nbio v4_3 uses the same irq source as nbio v7_4 */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + + return r; +} + +struct amdgpu_nbio_ras nbio_v4_3_ras = { + .handle_ras_err_event_athub_intr_no_bifring = nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring, + .init_ras_err_event_athub_interrupt = nbio_v4_3_init_ras_err_event_athub_interrupt, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h index 711999ceedf4..399037cdf4fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h @@ -29,5 +29,6 @@ extern const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v4_3_funcs; extern const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs; +extern struct amdgpu_nbio_ras nbio_v4_3_ras; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 19455a725939..685abf57ffdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -238,7 +238,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, if (use_doorbell) { ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); - ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4); + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 8); } else ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c new file mode 100644 index 000000000000..24d12075ca3a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -0,0 +1,369 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "nbio_v7_9.h" +#include "amdgpu_ras.h" + +#include "nbio/nbio_7_9_0_offset.h" +#include "nbio/nbio_7_9_0_sh_mask.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" +#include <uapi/linux/kfd_ioctl.h> + +static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} + +static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); + tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, STRAP_ATI_REV_ID_DEV0_F0); + + return tmp; +} + +static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, + BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); + else + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0); +} + +static u32 nbio_v7_9_get_memsize(struct amdgpu_device *adev) +{ + return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE); +} + +static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index, int doorbell_size) +{ + u32 doorbell_range = 0, doorbell_ctrl = 0; + + doorbell_range = + REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, + BIF_DOORBELL0_RANGE_OFFSET_ENTRY, doorbell_index); + doorbell_range = + REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, + BIF_DOORBELL0_RANGE_SIZE_ENTRY, doorbell_size); + doorbell_ctrl = + REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_ENABLE, 1); + doorbell_ctrl = + REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_SIZE, doorbell_size); + + switch (instance) { + case 0: + WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1, doorbell_range); + + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWID, 0xe); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_OFFSET, 0xe); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, + 0x1); + WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL, doorbell_ctrl); + break; + case 1: + WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2, doorbell_range); + + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWID, 0x8); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x8); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, + 0x2); + WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL, doorbell_ctrl); + break; + case 2: + WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3, doorbell_range); + + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWID, 0x9); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x9); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, + 0x8); + WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL, doorbell_ctrl); + break; + case 3: + WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4, doorbell_range); + + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWID, 0xa); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_OFFSET, 0xa); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, + 0x9); + WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_6_CTRL, doorbell_ctrl); + break; + default: + break; + }; + + return; +} + +static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, + int doorbell_index, int instance) +{ + u32 doorbell_range = 0, doorbell_ctrl = 0; + + if (use_doorbell) { + doorbell_range = REG_SET_FIELD(doorbell_range, + DOORBELL0_CTRL_ENTRY_0, + BIF_DOORBELL0_RANGE_OFFSET_ENTRY, + doorbell_index); + doorbell_range = REG_SET_FIELD(doorbell_range, + DOORBELL0_CTRL_ENTRY_0, + BIF_DOORBELL0_RANGE_SIZE_ENTRY, + 0x8); + + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_ENABLE, 1); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWID, 0x4); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_SIZE, 0x8); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4); + } else { + doorbell_range = REG_SET_FIELD(doorbell_range, + DOORBELL0_CTRL_ENTRY_0, + BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0); + doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_SIZE, 0); + } + + WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, doorbell_range); + WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL, doorbell_ctrl); +} + +static void nbio_v7_9_enable_doorbell_aperture(struct amdgpu_device *adev, + bool enable) +{ + /* Enable to allow doorbell pass thru on pre-silicon bare-metal */ + WREG32_SOC15(NBIO, 0, regBIFC_DOORBELL_ACCESS_EN_PF, 0xfffff); + WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN, + BIF_DOORBELL_APER_EN, enable ? 1 : 0); +} + +static void nbio_v7_9_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp = 0; + + if (enable) { + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_EN, 1) | + REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_MODE, 1) | + REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_SIZE, 0); + + WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW, + lower_32_bits(adev->doorbell.base)); + WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, + upper_32_bits(adev->doorbell.base)); + } + + WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp); +} + +static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev, + bool use_doorbell, int doorbell_index) +{ + u32 ih_doorbell_range = 0, ih_doorbell_ctrl = 0; + + if (use_doorbell) { + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + DOORBELL0_CTRL_ENTRY_0, + BIF_DOORBELL0_RANGE_OFFSET_ENTRY, + doorbell_index); + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + DOORBELL0_CTRL_ENTRY_0, + BIF_DOORBELL0_RANGE_SIZE_ENTRY, + 0x4); + + ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_ENABLE, 1); + ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWID, 0); + ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_OFFSET, 0); + ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_SIZE, 0x4); + ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0); + } else { + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + DOORBELL0_CTRL_ENTRY_0, + BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0); + ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl, + S2A_DOORBELL_ENTRY_1_CTRL, + S2A_DOORBELL_PORT1_RANGE_SIZE, 0); + } + + WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_0, ih_doorbell_range); + WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_3_CTRL, ih_doorbell_ctrl); +} + + +static void nbio_v7_9_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ +} + +static void nbio_v7_9_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ +} + +static void nbio_v7_9_get_clockgating_state(struct amdgpu_device *adev, + u64 *flags) +{ +} + +static void nbio_v7_9_ih_control(struct amdgpu_device *adev) +{ + u32 interrupt_cntl; + + /* setup interrupt control */ + WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); + interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL); + /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl = + REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); + /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl = + REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); + WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl); +} + +static u32 nbio_v7_9_get_hdp_flush_req_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ); +} + +static u32 nbio_v7_9_get_hdp_flush_done_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE); +} + +static u32 nbio_v7_9_get_pcie_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2); +} + +static u32 nbio_v7_9_get_pcie_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2); +} + +const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg = { + .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK, + .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK, + .ref_and_mask_sdma2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK, + .ref_and_mask_sdma3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, + .ref_and_mask_sdma4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, + .ref_and_mask_sdma5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, + .ref_and_mask_sdma6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, + .ref_and_mask_sdma7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, +}; + +static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev, + bool enable) +{ + WREG32_FIELD15_PREREG(NBIO, 0, BIF_BX0_BIF_DOORBELL_INT_CNTL, + DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); +} + +const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { + .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset, + .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, + .get_pcie_index_offset = nbio_v7_9_get_pcie_index_offset, + .get_pcie_data_offset = nbio_v7_9_get_pcie_data_offset, + .get_rev_id = nbio_v7_9_get_rev_id, + .mc_access_enable = nbio_v7_9_mc_access_enable, + .get_memsize = nbio_v7_9_get_memsize, + .sdma_doorbell_range = nbio_v7_9_sdma_doorbell_range, + .vcn_doorbell_range = nbio_v7_9_vcn_doorbell_range, + .enable_doorbell_aperture = nbio_v7_9_enable_doorbell_aperture, + .enable_doorbell_selfring_aperture = nbio_v7_9_enable_doorbell_selfring_aperture, + .ih_doorbell_range = nbio_v7_9_ih_doorbell_range, + .enable_doorbell_interrupt = nbio_v7_9_enable_doorbell_interrupt, + .update_medium_grain_clock_gating = nbio_v7_9_update_medium_grain_clock_gating, + .update_medium_grain_light_sleep = nbio_v7_9_update_medium_grain_light_sleep, + .get_clockgating_state = nbio_v7_9_get_clockgating_state, + .ih_control = nbio_v7_9_ih_control, + .remap_hdp_registers = nbio_v7_9_remap_hdp_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h new file mode 100644 index 000000000000..8e04eb484328 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h @@ -0,0 +1,32 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NBIO_V7_9_H__ +#define __NBIO_V7_9_H__ + +#include "soc15_common.h" + +extern const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg; +extern const struct amdgpu_nbio_funcs nbio_v7_9_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index ebe0e2d7dbd1..98c826f1f89b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -280,47 +280,6 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, } } -/* - * Indirect registers accessor - */ -static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg(adev, address, data, reg); -} - -static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg(adev, address, data, reg, v); -} - -static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg64(adev, address, data, reg); -} - -static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg64(adev, address, data, reg, v); -} - static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; @@ -561,21 +520,6 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) return 0; } -static void nv_pcie_gen3_enable(struct amdgpu_device *adev) -{ - if (pci_is_root_bus(adev->pdev->bus)) - return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) - return; - - /* todo */ -} - static void nv_program_aspm(struct amdgpu_device *adev) { if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) @@ -587,13 +531,6 @@ static void nv_program_aspm(struct amdgpu_device *adev) } -static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version nv_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, @@ -608,11 +545,6 @@ void nv_set_virt_ops(struct amdgpu_device *adev) adev->virt.ops = &xgpu_nv_virt_ops; } -static uint32_t nv_get_rev_id(struct amdgpu_device *adev) -{ - return adev->nbio.funcs->get_rev_id(adev); -} - static bool nv_need_full_reset(struct amdgpu_device *adev) { return true; @@ -738,10 +670,10 @@ static int nv_common_early_init(void *handle) } adev->smc_rreg = NULL; adev->smc_wreg = NULL; - adev->pcie_rreg = &nv_pcie_rreg; - adev->pcie_wreg = &nv_pcie_wreg; - adev->pcie_rreg64 = &nv_pcie_rreg64; - adev->pcie_wreg64 = &nv_pcie_wreg64; + adev->pcie_rreg = &amdgpu_device_indirect_rreg; + adev->pcie_wreg = &amdgpu_device_indirect_wreg; + adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; + adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; adev->pciep_rreg = amdgpu_device_pcie_port_rreg; adev->pciep_wreg = amdgpu_device_pcie_port_wreg; @@ -754,7 +686,7 @@ static int nv_common_early_init(void *handle) adev->asic_funcs = &nv_asic_funcs; - adev->rev_id = nv_get_rev_id(adev); + adev->rev_id = amdgpu_device_get_rev_id(adev); adev->external_rev_id = 0xff; /* TODO: split the GC and PG flags based on the relevant IP version for which * they are relevant. @@ -1060,6 +992,11 @@ static int nv_common_late_init(void *handle) } } + /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; } @@ -1088,8 +1025,6 @@ static int nv_common_hw_init(void *handle) if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa) adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev); - /* enable pcie gen2/3 link */ - nv_pcie_gen3_enable(adev); /* enable aspm */ nv_program_aspm(adev); /* setup nbio registers */ @@ -1101,7 +1036,7 @@ static int nv_common_hw_init(void *handle) if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ - nv_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true); return 0; } @@ -1110,8 +1045,13 @@ static int nv_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* disable the doorbell aperture */ - nv_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because nv_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index d62fcc77af95..caee76ab7110 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin"); /* For large FW files the time to complete can be very long */ #define USBC_PD_POLLING_LIMIT_S 240 @@ -100,6 +101,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) return err; break; case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 10): err = psp_init_sos_microcode(psp, ucode_prefix); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index b5affba22156..b3cc04dd8653 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1823,6 +1823,15 @@ static int sdma_v4_0_sw_init(void *handle) /* doorbell size is 2 dwords, get DWORD offset */ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; + /* + * On Arcturus, SDMA instance 5~7 has a different vmhub + * type(AMDGPU_MMHUB_1). + */ + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) + ring->vm_hub = AMDGPU_MMHUB_1; + else + ring->vm_hub = AMDGPU_MMHUB_0; + sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i, @@ -1841,6 +1850,11 @@ static int sdma_v4_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; ring->doorbell_index += 0x400; + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) + ring->vm_hub = AMDGPU_MMHUB_1; + else + ring->vm_hub = AMDGPU_MMHUB_0; + sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, @@ -1870,7 +1884,7 @@ static int sdma_v4_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->sdma.instance[i].page); } - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0) || + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) amdgpu_sdma_destroy_inst_ctx(adev, true); else @@ -2294,44 +2308,6 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_0, - .get_rptr = sdma_v4_0_ring_get_rptr, - .get_wptr = sdma_v4_0_ring_get_wptr, - .set_wptr = sdma_v4_0_ring_set_wptr, - .emit_frame_size = - 6 + /* sdma_v4_0_ring_emit_hdp_flush */ - 3 + /* hdp invalidate */ - 6 + /* sdma_v4_0_ring_emit_pipeline_sync */ - /* sdma_v4_0_ring_emit_vm_flush */ - SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + - 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ - .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ - .emit_ib = sdma_v4_0_ring_emit_ib, - .emit_fence = sdma_v4_0_ring_emit_fence, - .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync, - .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush, - .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush, - .test_ring = sdma_v4_0_ring_test_ring, - .test_ib = sdma_v4_0_ring_test_ib, - .insert_nop = sdma_v4_0_ring_insert_nop, - .pad_ib = sdma_v4_0_ring_pad_ib, - .emit_wreg = sdma_v4_0_ring_emit_wreg, - .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - -/* - * On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1). - * So create a individual constant ring_funcs for those instances. - */ -static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = { - .type = AMDGPU_RING_TYPE_SDMA, - .align_mask = 0xf, - .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), - .support_64bit_ptrs = true, - .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_1, .get_rptr = sdma_v4_0_ring_get_rptr, .get_wptr = sdma_v4_0_ring_get_wptr, .set_wptr = sdma_v4_0_ring_set_wptr, @@ -2364,40 +2340,6 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = { .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_0, - .get_rptr = sdma_v4_0_ring_get_rptr, - .get_wptr = sdma_v4_0_page_ring_get_wptr, - .set_wptr = sdma_v4_0_page_ring_set_wptr, - .emit_frame_size = - 6 + /* sdma_v4_0_ring_emit_hdp_flush */ - 3 + /* hdp invalidate */ - 6 + /* sdma_v4_0_ring_emit_pipeline_sync */ - /* sdma_v4_0_ring_emit_vm_flush */ - SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + - 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ - .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ - .emit_ib = sdma_v4_0_ring_emit_ib, - .emit_fence = sdma_v4_0_ring_emit_fence, - .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync, - .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush, - .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush, - .test_ring = sdma_v4_0_ring_test_ring, - .test_ib = sdma_v4_0_ring_test_ib, - .insert_nop = sdma_v4_0_ring_insert_nop, - .pad_ib = sdma_v4_0_ring_pad_ib, - .emit_wreg = sdma_v4_0_ring_emit_wreg, - .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - -static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = { - .type = AMDGPU_RING_TYPE_SDMA, - .align_mask = 0xf, - .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), - .support_64bit_ptrs = true, - .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_1, .get_rptr = sdma_v4_0_ring_get_rptr, .get_wptr = sdma_v4_0_page_ring_get_wptr, .set_wptr = sdma_v4_0_page_ring_set_wptr, @@ -2429,19 +2371,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) - adev->sdma.instance[i].ring.funcs = - &sdma_v4_0_ring_funcs_2nd_mmhub; - else - adev->sdma.instance[i].ring.funcs = - &sdma_v4_0_ring_funcs; + adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.me = i; if (adev->sdma.has_page_queue) { - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) - adev->sdma.instance[i].page.funcs = - &sdma_v4_0_page_ring_funcs_2nd_mmhub; - else - adev->sdma.instance[i].page.funcs = + adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs; adev->sdma.instance[i].page.me = i; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c new file mode 100644 index 000000000000..64dcaa2670dd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -0,0 +1,1967 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "amdgpu.h" +#include "amdgpu_ucode.h" +#include "amdgpu_trace.h" + +#include "sdma/sdma_4_4_2_offset.h" +#include "sdma/sdma_4_4_2_sh_mask.h" + +#include "soc15_common.h" +#include "soc15.h" +#include "vega10_sdma_pkt_open.h" + +#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h" +#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h" + +#include "amdgpu_ras.h" + +MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin"); + +#define WREG32_SDMA(instance, offset, value) \ + WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value) +#define RREG32_SDMA(instance, offset) \ + RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset))) + +static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev); +static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev); +static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev); +static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev); + +static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev, + u32 instance, u32 offset) +{ + return (adev->reg_offset[SDMA0_HWIP][instance][0] + offset); +} + +static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num) +{ + switch (seq_num) { + case 0: + return SOC15_IH_CLIENTID_SDMA0; + case 1: + return SOC15_IH_CLIENTID_SDMA1; + case 2: + return SOC15_IH_CLIENTID_SDMA2; + case 3: + return SOC15_IH_CLIENTID_SDMA3; + default: + return -EINVAL; + } +} + +static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id) +{ + switch (client_id) { + case SOC15_IH_CLIENTID_SDMA0: + return 0; + case SOC15_IH_CLIENTID_SDMA1: + return 1; + case SOC15_IH_CLIENTID_SDMA2: + return 2; + case SOC15_IH_CLIENTID_SDMA3: + return 3; + default: + return -EINVAL; + } +} + +static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 4, 2): + break; + default: + break; + } +} + +/** + * sdma_v4_4_2_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev) +{ + int ret, i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) { + ret = amdgpu_sdma_init_microcode(adev, 0, true); + break; + } else { + ret = amdgpu_sdma_init_microcode(adev, i, false); + if (ret) + return ret; + } + } + + return ret; +} + +/** + * sdma_v4_4_2_ring_get_rptr - get the current read pointer + * + * @ring: amdgpu ring pointer + * + * Get the current rptr from the hardware. + */ +static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring) +{ + u64 *rptr; + + /* XXX check if swapping is necessary on BE */ + rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); + + DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); + return ((*rptr) >> 2); +} + +/** + * sdma_v4_4_2_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware. + */ +static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 wptr; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); + } else { + wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI); + wptr = wptr << 32; + wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR); + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", + ring->me, wptr); + } + + return wptr >> 2; +} + +/** + * sdma_v4_4_2_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware. + */ +static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + DRM_DEBUG("Setting write pointer\n"); + if (ring->use_doorbell) { + u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; + + DRM_DEBUG("Using doorbell -- " + "wptr_offs == 0x%08x " + "lower_32_bits(ring->wptr) << 2 == 0x%08x " + "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", + ring->wptr_offs, + lower_32_bits(ring->wptr << 2), + upper_32_bits(ring->wptr << 2)); + /* XXX check if swapping is necessary on BE */ + WRITE_ONCE(*wb, (ring->wptr << 2)); + DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", + ring->doorbell_index, ring->wptr << 2); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + } else { + DRM_DEBUG("Not using doorbell -- " + "regSDMA%i_GFX_RB_WPTR == 0x%08x " + "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", + ring->me, + lower_32_bits(ring->wptr << 2), + ring->me, + upper_32_bits(ring->wptr << 2)); + WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR, + lower_32_bits(ring->wptr << 2)); + WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI, + upper_32_bits(ring->wptr << 2)); + } +} + +/** + * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware. + */ +static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 wptr; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + } else { + wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI); + wptr = wptr << 32; + wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR); + } + + return wptr >> 2; +} + +/** + * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware. + */ +static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; + + /* XXX check if swapping is necessary on BE */ + WRITE_ONCE(*wb, (ring->wptr << 2)); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + } else { + uint64_t wptr = ring->wptr << 2; + + WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR, + lower_32_bits(wptr)); + WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI, + upper_32_bits(wptr)); + } +} + +static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); + int i; + + for (i = 0; i < count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + amdgpu_ring_write(ring, ring->funcs->nop | + SDMA_PKT_NOP_HEADER_COUNT(count - 1)); + else + amdgpu_ring_write(ring, ring->funcs->nop); +} + +/** + * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine + * + * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from + * @ib: IB object to schedule + * @flags: unused + * + * Schedule an IB in the DMA ring. + */ +static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + /* IB packet must end on a 8 DW boundary */ + sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); + /* base must be 32 byte aligned */ + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + +} + +static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring, + int mem_space, int hdp, + uint32_t addr0, uint32_t addr1, + uint32_t ref, uint32_t mask, + uint32_t inv) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) | + SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ + if (mem_space) { + /* memory */ + amdgpu_ring_write(ring, addr0); + amdgpu_ring_write(ring, addr1); + } else { + /* registers */ + amdgpu_ring_write(ring, addr0 << 2); + amdgpu_ring_write(ring, addr1 << 2); + } + amdgpu_ring_write(ring, ref); /* reference */ + amdgpu_ring_write(ring, mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */ +} + +/** + * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring + * + * @ring: amdgpu ring pointer + * + * Emit an hdp flush packet on the requested DMA ring. + */ +static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 ref_and_mask = 0; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; + + sdma_v4_4_2_wait_reg_mem(ring, 0, 1, + adev->nbio.funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + ref_and_mask, ref_and_mask, 10); +} + +/** + * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring + * + * @ring: amdgpu ring pointer + * @addr: address + * @seq: sequence number + * @flags: fence related flags + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed. + */ +static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; + /* write the fence */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); + /* zero in first two bits */ + BUG_ON(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + /* optionally write high bits as well */ + if (write64bit) { + addr += 4; + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); + /* zero in first two bits */ + BUG_ON(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + } + + /* generate an interrupt */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); +} + + +/** + * sdma_v4_4_2_gfx_stop - stop the gfx async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the gfx async dma ring buffers. + */ +static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; + u32 rb_cntl, ib_cntl; + int i, unset = 0; + + for (i = 0; i < adev->sdma.num_instances; i++) { + sdma[i] = &adev->sdma.instance[i].ring; + + if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) { + amdgpu_ttm_set_buffer_funcs_status(adev, false); + unset = 1; + } + + rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); + ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + } +} + +/** + * sdma_v4_4_2_rlc_stop - stop the compute async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the compute async dma queues. + */ +static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev) +{ + /* XXX todo */ +} + +/** + * sdma_v4_4_2_page_stop - stop the page async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the page async dma ring buffers. + */ +static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; + u32 rb_cntl, ib_cntl; + int i; + bool unset = false; + + for (i = 0; i < adev->sdma.num_instances; i++) { + sdma[i] = &adev->sdma.instance[i].page; + + if ((adev->mman.buffer_funcs_ring == sdma[i]) && + (!unset)) { + amdgpu_ttm_set_buffer_funcs_status(adev, false); + unset = true; + } + + rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, + RB_ENABLE, 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl); + ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, + IB_ENABLE, 0); + WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl); + } +} + +/** + * sdma_v4_4_2_ctx_switch_enable - stop the async dma engines context switch + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs context switch. + * + * Halt or unhalt the async dma engines context switch. + */ +static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl, phase_quantum = 0; + int i; + + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >> + SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >> + SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >> + SDMA_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >> + SDMA_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32_SDMA(i, regSDMA_CNTL); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + if (enable && amdgpu_sdma_phase_quantum) { + WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum); + WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum); + WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum); + } + WREG32_SDMA(i, regSDMA_CNTL, f32_cntl); + + /* Extend page fault timeout to avoid interrupt storm */ + WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080); + } + +} + +/** + * sdma_v4_4_2_enable - stop the async dma engines + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. + * + * Halt or unhalt the async dma engines. + */ +static void sdma_v4_4_2_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl; + int i; + + if (!enable) { + sdma_v4_4_2_gfx_stop(adev); + sdma_v4_4_2_rlc_stop(adev); + if (adev->sdma.has_page_queue) + sdma_v4_4_2_page_stop(adev); + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1); + WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl); + } +} + +/* + * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl + */ +static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl) +{ + /* Set ring buffer size in dwords */ + uint32_t rb_bufsz = order_base_2(ring->ring_size / 4); + + barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz); +#ifdef __BIG_ENDIAN + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); +#endif + return rb_cntl; +} + +/** + * sdma_v4_4_2_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * @i: instance to resume + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i) +{ + struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; + u32 wb_offset; + u32 doorbell; + u32 doorbell_offset; + u64 wptr_gpu_addr; + + wb_offset = (ring->rptr_offs * 4); + + rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL); + rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl); + WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0); + + /* set the wb address whether it's enabled or not */ + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI, + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO, + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, + RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8); + WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40); + + ring->wptr = 0; + + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1); + + doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); + doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET); + + doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE, + ring->use_doorbell); + doorbell_offset = REG_SET_FIELD(doorbell_offset, + SDMA_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell); + WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset); + + sdma_v4_4_2_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0); + + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl); + + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); + + ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + + ring->sched.ready = true; +} + +/** + * sdma_v4_4_2_page_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * @i: instance to resume + * + * Set up the page DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i) +{ + struct amdgpu_ring *ring = &adev->sdma.instance[i].page; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; + u32 wb_offset; + u32 doorbell; + u32 doorbell_offset; + u64 wptr_gpu_addr; + + wb_offset = (ring->rptr_offs * 4); + + rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL); + rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl); + WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0); + + /* set the wb address whether it's enabled or not */ + WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI, + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO, + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, + RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8); + WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40); + + ring->wptr = 0; + + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1); + + doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL); + doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET); + + doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE, + ring->use_doorbell); + doorbell_offset = REG_SET_FIELD(doorbell_offset, + SDMA_PAGE_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell); + WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset); + + /* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */ + sdma_v4_4_2_page_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0); + + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA_PAGE_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl); + + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1); + WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl); + + ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl); + + ring->sched.ready = true; +} + +static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev) +{ + +} + +/** + * sdma_v4_4_2_rlc_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the compute DMA queues and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev) +{ + sdma_v4_4_2_init_pg(adev); + + return 0; +} + +/** + * sdma_v4_4_2_load_microcode - load the sDMA ME ucode + * + * @adev: amdgpu_device pointer + * + * Loads the sDMA0/1 ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev) +{ + const struct sdma_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + u32 fw_size; + int i, j; + + /* halt the MEs */ + sdma_v4_4_2_enable(adev, false); + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (!adev->sdma.instance[i].fw) + return -EINVAL; + + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; + amdgpu_ucode_print_sdma_hdr(&hdr->header); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + + fw_data = (const __le32 *) + (adev->sdma.instance[i].fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0); + + for (j = 0; j < fw_size; j++) + WREG32_SDMA(i, regSDMA_UCODE_DATA, + le32_to_cpup(fw_data++)); + + WREG32_SDMA(i, regSDMA_UCODE_ADDR, + adev->sdma.instance[i].fw_version); + } + + return 0; +} + +/** + * sdma_v4_4_2_start - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the DMA engines and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v4_4_2_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i, r = 0; + + if (amdgpu_sriov_vf(adev)) { + sdma_v4_4_2_ctx_switch_enable(adev, false); + sdma_v4_4_2_enable(adev, false); + } else { + /* bypass sdma microcode loading on Gopher */ + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP && + !(adev->pdev->device == 0x49) && !(adev->pdev->device == 0x50)) { + r = sdma_v4_4_2_load_microcode(adev); + if (r) + return r; + } + + /* unhalt the MEs */ + sdma_v4_4_2_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v4_4_2_ctx_switch_enable(adev, true); + } + + /* start the gfx rings and rlc compute queues */ + for (i = 0; i < adev->sdma.num_instances; i++) { + uint32_t temp; + + WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); + sdma_v4_4_2_gfx_resume(adev, i); + if (adev->sdma.has_page_queue) + sdma_v4_4_2_page_resume(adev, i); + + /* set utc l1 enable flag always to 1 */ + temp = RREG32_SDMA(i, regSDMA_CNTL); + temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1); + WREG32_SDMA(i, regSDMA_CNTL, temp); + + if (!amdgpu_sriov_vf(adev)) { + ring = &adev->sdma.instance[i].ring; + adev->nbio.funcs->sdma_doorbell_range(adev, i, + ring->use_doorbell, ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); + + /* unhalt engine */ + temp = RREG32_SDMA(i, regSDMA_F32_CNTL); + temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0); + WREG32_SDMA(i, regSDMA_F32_CNTL, temp); + } + } + + if (amdgpu_sriov_vf(adev)) { + sdma_v4_4_2_ctx_switch_enable(adev, true); + sdma_v4_4_2_enable(adev, true); + } else { + r = sdma_v4_4_2_rlc_resume(adev); + if (r) + return r; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + + if (adev->sdma.has_page_queue) { + struct amdgpu_ring *page = &adev->sdma.instance[i].page; + + r = amdgpu_ring_test_helper(page); + if (r) + return r; + + if (adev->mman.buffer_funcs_ring == page) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + } + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + } + + return r; +} + +/** + * sdma_v4_4_2_ring_test_ring - simple async dma engine test + * + * @ring: amdgpu_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. + * Returns 0 for success, error for failure. + */ +static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + unsigned i; + unsigned index; + int r; + u32 tmp; + u64 gpu_addr; + + r = amdgpu_device_wb_get(adev, &index); + if (r) + return r; + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ring_alloc(ring, 5); + if (r) + goto error_free_wb; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); + amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + +error_free_wb: + amdgpu_device_wb_free(adev, index); + return r; +} + +/** + * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Test a simple IB in the DMA ring. + * Returns 0 on success, error on failure. + */ +static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + struct dma_fence *f = NULL; + unsigned index; + long r; + u32 tmp = 0; + u64 gpu_addr; + + r = amdgpu_device_wb_get(adev, &index); + if (r) + return r; + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); + if (r) + goto err0; + + ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); + ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0); + ib.ptr[4] = 0xDEADBEEF; + ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.length_dw = 8; + + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + if (r) + goto err1; + + r = dma_fence_wait_timeout(f, false, timeout); + if (r == 0) { + r = -ETIMEDOUT; + goto err1; + } else if (r < 0) { + goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + r = 0; + else + r = -EINVAL; + +err1: + amdgpu_ib_free(adev, &ib, NULL); + dma_fence_put(f); +err0: + amdgpu_device_wb_free(adev, index); + return r; +} + + +/** + * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using sDMA. + */ +static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes - 1; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + +} + +/** + * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @value: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * + * Update PTEs by writing them manually using sDMA. + */ +static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) +{ + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw - 1; + for (; ndw > 0; ndw -= 2) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; + } +} + +/** + * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA. + */ +static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint64_t flags) +{ + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ + ib->ptr[ib->length_dw++] = upper_32_bits(flags); + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ +} + +/** + * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw + * + * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to fill with padding + */ +static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) +{ + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); + u32 pad_count; + int i; + + pad_count = (-ib->length_dw) & 7; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | + SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); + else + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP); +} + + +/** + * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline + * + * @ring: amdgpu_ring pointer + * + * Make sure all previous operations are completed (CIK). + */ +static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + /* wait for idle */ + sdma_v4_4_2_wait_reg_mem(ring, 1, 0, + addr & 0xfffffffc, + upper_32_bits(addr) & 0xffffffff, + seq, 0xffffffff, 4); +} + + +/** + * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA + * + * @ring: amdgpu_ring pointer + * @vmid: vmid number to use + * @pd_addr: address + * + * Update the page table base and flush the VM TLB + * using sDMA. + */ +static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); +} + +static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, val); +} + +static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10); +} + +static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 4, 2): + return false; + default: + return false; + } +} + +static int sdma_v4_4_2_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = sdma_v4_4_2_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load sdma firmware!\n"); + return r; + } + + /* TODO: Page queue breaks driver reload under SRIOV */ + if (sdma_v4_4_2_fw_support_paging_queue(adev)) + adev->sdma.has_page_queue = true; + + sdma_v4_4_2_set_ring_funcs(adev); + sdma_v4_4_2_set_buffer_funcs(adev); + sdma_v4_4_2_set_vm_pte_funcs(adev); + sdma_v4_4_2_set_irq_funcs(adev); + + return 0; +} + +#if 0 +static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry); +#endif + +static int sdma_v4_4_2_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; +#if 0 + struct ras_ih_if ih_info = { + .cb = sdma_v4_4_2_process_ras_data_cb, + }; +#endif + if (!amdgpu_persistent_edc_harvesting_supported(adev)) { + if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && + adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count) + adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev); + } + + return 0; +} + +static int sdma_v4_4_2_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* SDMA trap event */ + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_TRAP, + &adev->sdma.trap_irq); + if (r) + return r; + } + + /* SDMA SRAM ECC event */ + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_SRAM_ECC, + &adev->sdma.ecc_irq); + if (r) + return r; + } + + /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/ + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_VM_HOLE, + &adev->sdma.vm_hole_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID, + &adev->sdma.doorbell_invalid_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT, + &adev->sdma.pool_timeout_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_SRBMWRITE, + &adev->sdma.srbm_write_irq); + if (r) + return r; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + ring->ring_obj = NULL; + ring->use_doorbell = true; + + DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, + ring->use_doorbell?"true":"false"); + + /* doorbell size is 2 dwords, get DWORD offset */ + ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; + ring->vm_hub = AMDGPU_MMHUB_0; + + sprintf(ring->name, "sdma%d", i); + r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + if (adev->sdma.has_page_queue) { + ring = &adev->sdma.instance[i].page; + ring->ring_obj = NULL; + ring->use_doorbell = true; + + /* paging queue use same doorbell index/routing as gfx queue + * with 0x400 (4096 dwords) offset on second doorbell page + */ + ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; + ring->doorbell_index += 0x400; + ring->vm_hub = AMDGPU_MMHUB_0; + + sprintf(ring->name, "page%d", i); + r = amdgpu_ring_init(adev, ring, 1024, + &adev->sdma.trap_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + } + } + + return r; +} + +static int sdma_v4_4_2_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_ring_fini(&adev->sdma.instance[i].ring); + if (adev->sdma.has_page_queue) + amdgpu_ring_fini(&adev->sdma.instance[i].page); + } + + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) + amdgpu_sdma_destroy_inst_ctx(adev, true); + else + amdgpu_sdma_destroy_inst_ctx(adev, false); + + return 0; +} + +static int sdma_v4_4_2_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->flags & AMD_IS_APU) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); + + if (!amdgpu_sriov_vf(adev)) + sdma_v4_4_2_init_golden_registers(adev); + + r = sdma_v4_4_2_start(adev); + + return r; +} + +static int sdma_v4_4_2_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + if (amdgpu_sriov_vf(adev)) + return 0; + + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + } + + sdma_v4_4_2_ctx_switch_enable(adev, false); + sdma_v4_4_2_enable(adev, false); + + return 0; +} + +static int sdma_v4_4_2_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return sdma_v4_4_2_hw_fini(adev); +} + +static int sdma_v4_4_2_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return sdma_v4_4_2_hw_init(adev); +} + +static bool sdma_v4_4_2_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG); + + if (!(tmp & SDMA_STATUS_REG__IDLE_MASK)) + return false; + } + + return true; +} + +static int sdma_v4_4_2_wait_for_idle(void *handle) +{ + unsigned i, j; + u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + for (j = 0; j < adev->sdma.num_instances; j++) { + sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG); + if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK)) + break; + } + if (j == adev->sdma.num_instances) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static int sdma_v4_4_2_soft_reset(void *handle) +{ + /* todo */ + + return 0; +} + +static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_cntl; + + sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); + + return 0; +} + +static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t instance; + + DRM_DEBUG("IH: SDMA trap\n"); + instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + switch (entry->ring_id) { + case 0: + amdgpu_fence_process(&adev->sdma.instance[instance].ring); + break; + default: + break; + } + return 0; +} + +#if 0 +static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry) +{ + int instance; + + /* When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + goto out; + + instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + if (instance < 0) + goto out; + + amdgpu_sdma_process_ras_data_cb(adev, err_data, entry); + +out: + return AMDGPU_RAS_SUCCESS; +} +#endif + +static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int instance; + + DRM_ERROR("Illegal instruction in SDMA command stream\n"); + + instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + if (instance < 0) + return 0; + + switch (entry->ring_id) { + case 0: + drm_sched_fault(&adev->sdma.instance[instance].ring.sched); + break; + } + return 0; +} + +static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_edc_config; + + sdma_edc_config = RREG32_SDMA(type, regCC_SDMA_EDC_CONFIG); + /* + * FIXME: This was inherited from Aldebaran, but no this field + * definition in the regspec of both Aldebaran and SDMA 4.4.2 + */ + sdma_edc_config |= (state == AMDGPU_IRQ_STATE_ENABLE) ? (1 << 2) : 0; + WREG32_SDMA(type, regCC_SDMA_EDC_CONFIG, sdma_edc_config); + + return 0; +} + +static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + int instance; + struct amdgpu_task_info task_info; + u64 addr; + + instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + if (instance < 0 || instance >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance invalid %d\n", instance); + return -EINVAL; + } + + addr = (u64)entry->src_data[0] << 12; + addr |= ((u64)entry->src_data[1] & 0xf) << 44; + + memset(&task_info, 0, sizeof(struct amdgpu_task_info)); + amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); + + dev_dbg_ratelimited(adev->dev, + "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u " + "pasid:%u, for process %s pid %d thread %s pid %d\n", + instance, addr, entry->src_id, entry->ring_id, entry->vmid, + entry->pasid, task_info.process_name, task_info.tgid, + task_info.task_name, task_info.pid); + return 0; +} + +static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n"); + sdma_v4_4_2_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + + dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n"); + sdma_v4_4_2_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, + "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n"); + sdma_v4_4_2_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, + "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n"); + sdma_v4_4_2_print_iv_entry(adev, entry); + return 0; +} + +static void sdma_v4_4_2_update_medium_grain_clock_gating( + struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def; + int i; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL); + data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK); + if (def != data) + WREG32_SDMA(i, regSDMA_CLK_CTRL, data); + } + } else { + for (i = 0; i < adev->sdma.num_instances; i++) { + def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL); + data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK); + if (def != data) + WREG32_SDMA(i, regSDMA_CLK_CTRL, data); + } + } +} + + +static void sdma_v4_4_2_update_medium_grain_light_sleep( + struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def; + int i; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + /* 1-not override: enable sdma mem light sleep */ + def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL); + data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (def != data) + WREG32_SDMA(0, regSDMA_POWER_CNTL, data); + } + } else { + for (i = 0; i < adev->sdma.num_instances; i++) { + /* 0-override:disable sdma mem light sleep */ + def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL); + data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (def != data) + WREG32_SDMA(0, regSDMA_POWER_CNTL, data); + } + } +} + +static int sdma_v4_4_2_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + return 0; + + sdma_v4_4_2_update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); + sdma_v4_4_2_update_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE); + return 0; +} + +static int sdma_v4_4_2_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + if (amdgpu_sriov_vf(adev)) + *flags = 0; + + /* AMD_CG_SUPPORT_SDMA_MGCG */ + data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_CLK_CTRL)); + if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK)) + *flags |= AMD_CG_SUPPORT_SDMA_MGCG; + + /* AMD_CG_SUPPORT_SDMA_LS */ + data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_POWER_CNTL)); + if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) + *flags |= AMD_CG_SUPPORT_SDMA_LS; +} + +const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = { + .name = "sdma_v4_4_2", + .early_init = sdma_v4_4_2_early_init, + .late_init = sdma_v4_4_2_late_init, + .sw_init = sdma_v4_4_2_sw_init, + .sw_fini = sdma_v4_4_2_sw_fini, + .hw_init = sdma_v4_4_2_hw_init, + .hw_fini = sdma_v4_4_2_hw_fini, + .suspend = sdma_v4_4_2_suspend, + .resume = sdma_v4_4_2_resume, + .is_idle = sdma_v4_4_2_is_idle, + .wait_for_idle = sdma_v4_4_2_wait_for_idle, + .soft_reset = sdma_v4_4_2_soft_reset, + .set_clockgating_state = sdma_v4_4_2_set_clockgating_state, + .set_powergating_state = sdma_v4_4_2_set_powergating_state, + .get_clockgating_state = sdma_v4_4_2_get_clockgating_state, +}; + +static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = { + .type = AMDGPU_RING_TYPE_SDMA, + .align_mask = 0xf, + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), + .support_64bit_ptrs = true, + .get_rptr = sdma_v4_4_2_ring_get_rptr, + .get_wptr = sdma_v4_4_2_ring_get_wptr, + .set_wptr = sdma_v4_4_2_ring_set_wptr, + .emit_frame_size = + 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */ + 3 + /* hdp invalidate */ + 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ + /* sdma_v4_4_2_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ + .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ + .emit_ib = sdma_v4_4_2_ring_emit_ib, + .emit_fence = sdma_v4_4_2_ring_emit_fence, + .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync, + .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush, + .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush, + .test_ring = sdma_v4_4_2_ring_test_ring, + .test_ib = sdma_v4_4_2_ring_test_ib, + .insert_nop = sdma_v4_4_2_ring_insert_nop, + .pad_ib = sdma_v4_4_2_ring_pad_ib, + .emit_wreg = sdma_v4_4_2_ring_emit_wreg, + .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { + .type = AMDGPU_RING_TYPE_SDMA, + .align_mask = 0xf, + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), + .support_64bit_ptrs = true, + .get_rptr = sdma_v4_4_2_ring_get_rptr, + .get_wptr = sdma_v4_4_2_page_ring_get_wptr, + .set_wptr = sdma_v4_4_2_page_ring_set_wptr, + .emit_frame_size = + 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */ + 3 + /* hdp invalidate */ + 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ + /* sdma_v4_4_2_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ + .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ + .emit_ib = sdma_v4_4_2_ring_emit_ib, + .emit_fence = sdma_v4_4_2_ring_emit_fence, + .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync, + .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush, + .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush, + .test_ring = sdma_v4_4_2_ring_test_ring, + .test_ib = sdma_v4_4_2_ring_test_ib, + .insert_nop = sdma_v4_4_2_ring_insert_nop, + .pad_ib = sdma_v4_4_2_ring_pad_ib, + .emit_wreg = sdma_v4_4_2_ring_emit_wreg, + .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs; + adev->sdma.instance[i].ring.me = i; + if (adev->sdma.has_page_queue) { + adev->sdma.instance[i].page.funcs = + &sdma_v4_4_2_page_ring_funcs; + adev->sdma.instance[i].page.me = i; + } + } +} + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = { + .set = sdma_v4_4_2_set_trap_irq_state, + .process = sdma_v4_4_2_process_trap_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = { + .process = sdma_v4_4_2_process_illegal_inst_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = { + .set = sdma_v4_4_2_set_ecc_irq_state, + .process = amdgpu_sdma_process_ecc_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = { + .process = sdma_v4_4_2_process_vm_hole_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = { + .process = sdma_v4_4_2_process_doorbell_invalid_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = { + .process = sdma_v4_4_2_process_pool_timeout_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = { + .process = sdma_v4_4_2_process_srbm_write_irq, +}; + +static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->sdma.trap_irq.num_types = adev->sdma.num_instances; + adev->sdma.ecc_irq.num_types = adev->sdma.num_instances; + adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances; + adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances; + adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances; + adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances; + + adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs; + adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs; + adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs; + adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs; + adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs; + adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs; + adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs; +} + +/** + * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine + * + * @ib: indirect buffer to copy to + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * @tmz: if a secure copy should be used + * + * Copy GPU buffers using the DMA engine. + * Used by the amdgpu ttm implementation to move pages if + * registered as the asic copy callback. + */ +static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count, + bool tmz) +{ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | + SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); + ib->ptr[ib->length_dw++] = byte_count - 1; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); +} + +/** + * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine + * + * @ib: indirect buffer to copy to + * @src_data: value to write to buffer + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Fill GPU buffers using the DMA engine. + */ +static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib, + uint32_t src_data, + uint64_t dst_offset, + uint32_t byte_count) +{ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = src_data; + ib->ptr[ib->length_dw++] = byte_count - 1; +} + +static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = { + .copy_max_bytes = 0x400000, + .copy_num_dw = 7, + .emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer, + + .fill_max_bytes = 0x400000, + .fill_num_dw = 5, + .emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer, +}; + +static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev) +{ + adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs; + if (adev->sdma.has_page_queue) + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page; + else + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; +} + +static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = { + .copy_pte_num_dw = 7, + .copy_pte = sdma_v4_4_2_vm_copy_pte, + + .write_pte = sdma_v4_4_2_vm_write_pte, + .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde, +}; + +static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev) +{ + struct drm_gpu_scheduler *sched; + unsigned i; + + adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->sdma.has_page_queue) + sched = &adev->sdma.instance[i].page.sched; + else + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_scheds[i] = sched; + } + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; +} + +const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 4, + .minor = 4, + .rev = 0, + .funcs = &sdma_v4_4_2_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h new file mode 100644 index 000000000000..4814e8a074d6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h @@ -0,0 +1,30 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SDMA_V4_4_2_H__ +#define __SDMA_V4_4_2_H__ + +extern const struct amd_ip_funcs sdma_v4_4_2_ip_funcs; +extern const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 1941b3b7c5d9..92e1299be021 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1389,6 +1389,7 @@ static int sdma_v5_0_sw_init(void *handle) (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset + ring->vm_hub = AMDGPU_GFXHUB_0; sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : @@ -1765,7 +1766,6 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = sdma_v5_0_ring_get_rptr, .get_wptr = sdma_v5_0_ring_get_wptr, .set_wptr = sdma_v5_0_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 8e445eb9dd49..ca7e8757d78e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1253,6 +1253,7 @@ static int sdma_v5_2_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset + ring->vm_hub = AMDGPU_GFXHUB_0; sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i, @@ -1653,7 +1654,6 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = { .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = sdma_v5_2_ring_get_rptr, .get_wptr = sdma_v5_2_ring_get_wptr, .set_wptr = sdma_v5_2_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 40e6b22daa22..3d9a80511a45 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -403,15 +403,26 @@ static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev) } /** - * sdma_v6_0_ctx_switch_enable - stop the async dma engines context switch + * sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts * * @adev: amdgpu_device pointer - * @enable: enable/disable the DMA MEs context switch. + * @enable: enable/disable context switching due to queue empty conditions * - * Halt or unhalt the async dma engines context switch. + * Enable or disable the async dma engines queue empty context switch. */ -static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable) { + u32 f32_cntl; + int i; + + if (!amdgpu_sriov_vf(adev)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + CTXEMPTY_INT_ENABLE, enable ? 1 : 0); + WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl); + } + } } /** @@ -499,10 +510,7 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - if (amdgpu_sriov_vf(adev)) - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1); - else - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); @@ -579,10 +587,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) ring->sched.ready = true; - if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ - sdma_v6_0_ctx_switch_enable(adev, true); + if (amdgpu_sriov_vf(adev)) sdma_v6_0_enable(adev, true); - } r = amdgpu_ring_test_helper(ring); if (r) { @@ -778,7 +784,6 @@ static int sdma_v6_0_start(struct amdgpu_device *adev) int r = 0; if (amdgpu_sriov_vf(adev)) { - sdma_v6_0_ctx_switch_enable(adev, false); sdma_v6_0_enable(adev, false); /* set RB registers */ @@ -799,7 +804,7 @@ static int sdma_v6_0_start(struct amdgpu_device *adev) /* unhalt the MEs */ sdma_v6_0_enable(adev, true); /* enable sdma ring preemption */ - sdma_v6_0_ctx_switch_enable(adev, true); + sdma_v6_0_ctxempty_int_enable(adev, true); /* start the gfx rings and rlc compute queues */ r = sdma_v6_0_gfx_resume(adev); @@ -1173,7 +1178,28 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; + uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); + + /* Update the PD address for this VMID. */ + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + + (hub->ctx_addr_distance * vmid), + lower_32_bits(pd_addr)); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + + (hub->ctx_addr_distance * vmid), + upper_32_bits(pd_addr)); + + /* Trigger invalidation. */ + amdgpu_ring_write(ring, + SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) | + SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) | + SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f)); + amdgpu_ring_write(ring, req); + amdgpu_ring_write(ring, 0xFFFFFFFF); + amdgpu_ring_write(ring, + SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) | + SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F)); } static void sdma_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, @@ -1272,6 +1298,7 @@ static int sdma_v6_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset + ring->vm_hub = AMDGPU_GFXHUB_0; sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, @@ -1319,7 +1346,7 @@ static int sdma_v6_0_hw_fini(void *handle) return 0; } - sdma_v6_0_ctx_switch_enable(adev, false); + sdma_v6_0_ctxempty_int_enable(adev, false); sdma_v6_0_enable(adev, false); return 0; @@ -1528,7 +1555,6 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = { .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = true, .secure_submission_supported = true, - .vmhub = AMDGPU_GFXHUB_0, .get_rptr = sdma_v6_0_ring_get_rptr, .get_wptr = sdma_v6_0_ring_get_wptr, .set_wptr = sdma_v6_0_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 81a6d5b94987..8b8086d5c864 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -40,7 +40,7 @@ static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_c adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev)) return true; #endif - return false; + return amdgpu_reset_method == AMD_RESET_METHOD_MODE2; } static struct amdgpu_reset_handler * diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 2eddd7f6cd41..6d15d5cd9e07 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -191,47 +191,6 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, } } -/* - * Indirect registers accessor - */ -static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg(adev, address, data, reg); -} - -static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg(adev, address, data, reg, v); -} - -static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg64(adev, address, data, reg); -} - -static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg64(adev, address, data, reg, v); -} - static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; @@ -342,11 +301,10 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) - return 10000; - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) - return reference_clock / 4; + return 10000; return reference_clock; } @@ -651,24 +609,6 @@ static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk return 0; } -static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) -{ - if (pci_is_root_bus(adev->pdev->bus)) - return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (adev->flags & AMD_IS_APU) - return; - - if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) - return; - - /* todo */ -} - static void soc15_program_aspm(struct amdgpu_device *adev) { if (!amdgpu_device_should_use_aspm(adev)) @@ -679,13 +619,6 @@ static void soc15_program_aspm(struct amdgpu_device *adev) adev->nbio.funcs->program_aspm(adev); } -static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version vega10_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, @@ -695,11 +628,6 @@ const struct amdgpu_ip_block_version vega10_common_ip_block = .funcs = &soc15_common_ip_funcs, }; -static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) -{ - return adev->nbio.funcs->get_rev_id(adev); -} - static void soc15_reg_base_init(struct amdgpu_device *adev) { /* Set IP register base before any HW register access */ @@ -936,10 +864,10 @@ static int soc15_common_early_init(void *handle) } adev->smc_rreg = NULL; adev->smc_wreg = NULL; - adev->pcie_rreg = &soc15_pcie_rreg; - adev->pcie_wreg = &soc15_pcie_wreg; - adev->pcie_rreg64 = &soc15_pcie_rreg64; - adev->pcie_wreg64 = &soc15_pcie_wreg64; + adev->pcie_rreg = &amdgpu_device_indirect_rreg; + adev->pcie_wreg = &amdgpu_device_indirect_wreg; + adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; + adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; adev->didt_rreg = &soc15_didt_rreg; @@ -949,7 +877,7 @@ static int soc15_common_early_init(void *handle) adev->se_cac_rreg = &soc15_se_cac_rreg; adev->se_cac_wreg = &soc15_se_cac_wreg; - adev->rev_id = soc15_get_rev_id(adev); + adev->rev_id = amdgpu_device_get_rev_id(adev); adev->external_rev_id = 0xFF; /* TODO: split the GC and PG flags based on the relevant IP version for which * they are relevant. @@ -1165,6 +1093,11 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x3c; break; + case IP_VERSION(9, 4, 3): + adev->asic_funcs = &vega20_asic_funcs; + adev->cg_flags = 0; + adev->pg_flags = 0; + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -1185,6 +1118,11 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); + /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; } @@ -1230,8 +1168,6 @@ static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* enable pcie gen2/3 link */ - soc15_pcie_gen3_enable(adev); /* enable aspm */ soc15_program_aspm(adev); /* setup nbio registers */ @@ -1244,7 +1180,8 @@ static int soc15_common_hw_init(void *handle) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ - soc15_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true); + /* HW doorbell routing policy: doorbell writing not * in SDMA/IH/MM/ACV range will be routed to CP. So * we need to init SDMA doorbell range prior @@ -1260,8 +1197,14 @@ static int soc15_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* disable the doorbell aperture */ - soc15_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because soc15_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); + if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index c82b3a7ea5f0..744be2a05623 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -196,46 +196,6 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, return -EINVAL; } } -/* - * Indirect registers accessor - */ -static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg(adev, address, data, reg); -} - -static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg(adev, address, data, reg, v); -} - -static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg64(adev, address, data, reg); -} - -static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg64(adev, address, data, reg, v); -} static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg) { @@ -480,21 +440,6 @@ static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk return 0; } -static void soc21_pcie_gen3_enable(struct amdgpu_device *adev) -{ - if (pci_is_root_bus(adev->pdev->bus)) - return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) - return; - - /* todo */ -} - static void soc21_program_aspm(struct amdgpu_device *adev) { if (!amdgpu_device_should_use_aspm(adev)) @@ -505,13 +450,6 @@ static void soc21_program_aspm(struct amdgpu_device *adev) adev->nbio.funcs->program_aspm(adev); } -static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version soc21_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, @@ -521,11 +459,6 @@ const struct amdgpu_ip_block_version soc21_common_ip_block = .funcs = &soc21_common_ip_funcs, }; -static uint32_t soc21_get_rev_id(struct amdgpu_device *adev) -{ - return adev->nbio.funcs->get_rev_id(adev); -} - static bool soc21_need_full_reset(struct amdgpu_device *adev) { switch (adev->ip_versions[GC_HWIP][0]) { @@ -650,10 +583,10 @@ static int soc21_common_early_init(void *handle) adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; adev->smc_rreg = NULL; adev->smc_wreg = NULL; - adev->pcie_rreg = &soc21_pcie_rreg; - adev->pcie_wreg = &soc21_pcie_wreg; - adev->pcie_rreg64 = &soc21_pcie_rreg64; - adev->pcie_wreg64 = &soc21_pcie_wreg64; + adev->pcie_rreg = &amdgpu_device_indirect_rreg; + adev->pcie_wreg = &amdgpu_device_indirect_wreg; + adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; + adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; adev->pciep_rreg = amdgpu_device_pcie_port_rreg; adev->pciep_wreg = amdgpu_device_pcie_port_wreg; @@ -666,7 +599,7 @@ static int soc21_common_early_init(void *handle) adev->asic_funcs = &soc21_asic_funcs; - adev->rev_id = soc21_get_rev_id(adev); + adev->rev_id = amdgpu_device_get_rev_id(adev); adev->external_rev_id = 0xff; switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(11, 0, 0): @@ -814,8 +747,21 @@ static int soc21_common_late_init(void *handle) sriov_vcn_4_0_0_video_codecs_decode_array_vcn0, ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0)); } + } else { + if (adev->nbio.ras && + adev->nbio.ras_err_event_athub_irq.funcs) + /* don't need to fail gpu late init + * if enabling athub_err_event interrupt failed + * nbio v4_3 only support fatal error hanlding + * just enable the interrupt directly */ + amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0); } + /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; } @@ -838,8 +784,6 @@ static int soc21_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* enable pcie gen2/3 link */ - soc21_pcie_gen3_enable(adev); /* enable aspm */ soc21_program_aspm(adev); /* setup nbio registers */ @@ -851,7 +795,7 @@ static int soc21_common_hw_init(void *handle) if (adev->nbio.funcs->remap_hdp_registers) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ - soc21_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true); return 0; } @@ -860,11 +804,21 @@ static int soc21_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* disable the doorbell aperture */ - soc21_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because soc21_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_put_irq(adev); + } else { + if (adev->nbio.ras && + adev->nbio.ras_err_event_athub_irq.funcs) + amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 509d8a1945eb..30d0482ac466 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -84,6 +84,8 @@ enum ta_ras_block { TA_RAS_BLOCK__MP1, TA_RAS_BLOCK__FUSE, TA_RAS_BLOCK__MCA, + TA_RAS_BLOCK__VCN, + TA_RAS_BLOCK__JPEG, TA_NUM_BLOCK_MAX }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index e08e25a3a1a9..530549314ce4 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -160,24 +160,28 @@ static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_dev } } +static int umc_v6_7_ecc_info_querry_ecc_error_count(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) +{ + struct ras_err_data *err_data = (struct ras_err_data *)data; + + umc_v6_7_ecc_info_query_correctable_error_count(adev, + umc_inst, ch_inst, + &(err_data->ce_count)); + + umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev, + umc_inst, ch_inst, + &(err_data->ue_count)); + + return 0; +} + static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - - /*TODO: driver needs to toggle DF Cstate to ensure - * safe access of UMC registers. Will add the protection */ - LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_v6_7_ecc_info_query_correctable_error_count(adev, - umc_inst, ch_inst, - &(err_data->ce_count)); - umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev, - umc_inst, ch_inst, - &(err_data->ue_count)); - } + amdgpu_umc_loop_channels(adev, + umc_v6_7_ecc_info_querry_ecc_error_count, ras_error_status); } void umc_v6_7_convert_error_address(struct amdgpu_device *adev, @@ -215,23 +219,23 @@ void umc_v6_7_convert_error_address(struct amdgpu_device *adev, } } -static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, - struct ras_err_data *err_data, - uint32_t ch_inst, - uint32_t umc_inst) +static int umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) { uint64_t mc_umc_status, err_addr; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct ras_err_data *err_data = (struct ras_err_data *)data; eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if (mc_umc_status == 0) - return; + return 0; if (!err_data->err_addr) - return; + return 0; /* calculate error address if ue error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && @@ -243,25 +247,15 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, umc_v6_7_convert_error_address(adev, err_data, err_addr, ch_inst, umc_inst); } + + return 0; } static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - - /*TODO: driver needs to toggle DF Cstate to ensure - * safe access of UMC resgisters. Will add the protection - * when firmware interface is ready */ - LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_v6_7_ecc_info_query_error_address(adev, - err_data, - ch_inst, - umc_inst); - } + amdgpu_umc_loop_channels(adev, + umc_v6_7_ecc_info_query_error_address, ras_error_status); } static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev, @@ -364,11 +358,14 @@ static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev } } -static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev, - uint32_t umc_reg_offset) +static int umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) { uint32_t ecc_err_cnt_addr; uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + uint32_t umc_reg_offset = + get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst); ecc_err_cnt_sel_addr = SOC15_REG_OFFSET(UMC, 0, @@ -402,58 +399,54 @@ static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev, /* clear higher chip error count */ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_7_CE_CNT_INIT); + + return 0; } static void umc_v6_7_reset_error_count(struct amdgpu_device *adev) { - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; + amdgpu_umc_loop_channels(adev, + umc_v6_7_reset_error_count_per_channel, NULL); +} + +static int umc_v6_7_query_ecc_error_count(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) +{ + struct ras_err_data *err_data = (struct ras_err_data *)data; + uint32_t umc_reg_offset = + get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst); + + umc_v6_7_query_correctable_error_count(adev, + umc_reg_offset, + &(err_data->ce_count), + ch_inst, umc_inst); - LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_v6_7_reg_offset(adev, - umc_inst, - ch_inst); + umc_v6_7_querry_uncorrectable_error_count(adev, + umc_reg_offset, + &(err_data->ue_count)); - umc_v6_7_reset_error_count_per_channel(adev, - umc_reg_offset); - } + return 0; } static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; - - /*TODO: driver needs to toggle DF Cstate to ensure - * safe access of UMC registers. Will add the protection */ - LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_v6_7_reg_offset(adev, - umc_inst, - ch_inst); - umc_v6_7_query_correctable_error_count(adev, - umc_reg_offset, - &(err_data->ce_count), - ch_inst, umc_inst); - umc_v6_7_querry_uncorrectable_error_count(adev, - umc_reg_offset, - &(err_data->ue_count)); - } + amdgpu_umc_loop_channels(adev, + umc_v6_7_query_ecc_error_count, ras_error_status); umc_v6_7_reset_error_count(adev); } -static void umc_v6_7_query_error_address(struct amdgpu_device *adev, - struct ras_err_data *err_data, - uint32_t umc_reg_offset, uint32_t ch_inst, - uint32_t umc_inst) +static int umc_v6_7_query_error_address(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) { uint32_t mc_umc_status_addr; uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr; + struct ras_err_data *err_data = (struct ras_err_data *)data; + uint32_t umc_reg_offset = + get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst); mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); @@ -463,12 +456,12 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev, mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); if (mc_umc_status == 0) - return; + return 0; if (!err_data->err_addr) { /* clear umc status */ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); - return; + return 0; } /* calculate error address if ue error is detected */ @@ -484,29 +477,15 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev, /* clear umc status */ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + + return 0; } static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; - - /*TODO: driver needs to toggle DF Cstate to ensure - * safe access of UMC resgisters. Will add the protection - * when firmware interface is ready */ - LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_v6_7_reg_offset(adev, - umc_inst, - ch_inst); - umc_v6_7_query_error_address(adev, - err_data, - umc_reg_offset, ch_inst, - umc_inst); - } + amdgpu_umc_loop_channels(adev, + umc_v6_7_query_error_address, ras_error_status); } static uint32_t umc_v6_7_query_ras_poison_mode_per_channel( diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index fb55e8cb9967..d51ae0bc36f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -76,10 +76,13 @@ static inline uint32_t get_umc_v8_10_reg_offset(struct amdgpu_device *adev, UMC_8_NODE_DIST * node_inst; } -static void umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev, - uint32_t umc_reg_offset) +static int umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) { uint32_t ecc_err_cnt_addr; + uint32_t umc_reg_offset = + get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst); ecc_err_cnt_addr = SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt); @@ -87,24 +90,14 @@ static void umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev, /* clear error count */ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_10_CE_CNT_INIT); + + return 0; } static void umc_v8_10_clear_error_count(struct amdgpu_device *adev) { - uint32_t node_inst = 0; - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; - - LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) { - umc_reg_offset = get_umc_v8_10_reg_offset(adev, - node_inst, - umc_inst, - ch_inst); - - umc_v8_10_clear_error_count_per_channel(adev, - umc_reg_offset); - } + amdgpu_umc_loop_channels(adev, + umc_v8_10_clear_error_count_per_channel, NULL); } static void umc_v8_10_query_correctable_error_count(struct amdgpu_device *adev, @@ -147,29 +140,29 @@ static void umc_v8_10_query_uncorrectable_error_count(struct amdgpu_device *adev *error_count += 1; } +static int umc_v8_10_query_ecc_error_count(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) +{ + struct ras_err_data *err_data = (struct ras_err_data *)data; + uint32_t umc_reg_offset = + get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst); + + umc_v8_10_query_correctable_error_count(adev, + umc_reg_offset, + &(err_data->ce_count)); + umc_v8_10_query_uncorrectable_error_count(adev, + umc_reg_offset, + &(err_data->ue_count)); + + return 0; +} + static void umc_v8_10_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - uint32_t node_inst = 0; - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; - - LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) { - umc_reg_offset = get_umc_v8_10_reg_offset(adev, - node_inst, - umc_inst, - ch_inst); - - umc_v8_10_query_correctable_error_count(adev, - umc_reg_offset, - &(err_data->ce_count)); - umc_v8_10_query_uncorrectable_error_count(adev, - umc_reg_offset, - &(err_data->ue_count)); - } + amdgpu_umc_loop_channels(adev, + umc_v8_10_query_ecc_error_count, ras_error_status); umc_v8_10_clear_error_count(adev); } @@ -248,28 +241,28 @@ static void umc_v8_10_convert_error_address(struct amdgpu_device *adev, } } -static void umc_v8_10_query_error_address(struct amdgpu_device *adev, - struct ras_err_data *err_data, - uint32_t umc_reg_offset, - uint32_t node_inst, - uint32_t ch_inst, - uint32_t umc_inst) +static int umc_v8_10_query_error_address(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) { uint64_t mc_umc_status_addr; uint64_t mc_umc_status, err_addr; uint64_t mc_umc_addrt0; + struct ras_err_data *err_data = (struct ras_err_data *)data; + uint32_t umc_reg_offset = + get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst); mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); if (mc_umc_status == 0) - return; + return 0; if (!err_data->err_addr) { /* clear umc status */ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); - return; + return 0; } /* calculate error address if ue error is detected */ @@ -287,37 +280,25 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev, /* clear umc status */ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + + return 0; } static void umc_v8_10_query_ras_error_address(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - uint32_t node_inst = 0; - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; - - LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) { - umc_reg_offset = get_umc_v8_10_reg_offset(adev, - node_inst, - umc_inst, - ch_inst); - - umc_v8_10_query_error_address(adev, - err_data, - umc_reg_offset, - node_inst, - ch_inst, - umc_inst); - } + amdgpu_umc_loop_channels(adev, + umc_v8_10_query_error_address, ras_error_status); } -static void umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev, - uint32_t umc_reg_offset) +static int umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) { uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; uint32_t ecc_err_cnt_addr; + uint32_t umc_reg_offset = + get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst); ecc_err_cnt_sel_addr = SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCntSel); @@ -332,23 +313,14 @@ static void umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev, WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); /* set error count to initial value */ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_10_CE_CNT_INIT); + + return 0; } static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev) { - uint32_t node_inst = 0; - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; - - LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) { - umc_reg_offset = get_umc_v8_10_reg_offset(adev, - node_inst, - umc_inst, - ch_inst); - - umc_v8_10_err_cnt_init_per_channel(adev, umc_reg_offset); - } + amdgpu_umc_loop_channels(adev, + umc_v8_10_err_cnt_init_per_channel, NULL); } static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev) @@ -406,37 +378,35 @@ static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_dev } } +static int umc_v8_10_ecc_info_query_ecc_error_count(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) +{ + struct ras_err_data *err_data = (struct ras_err_data *)data; + + umc_v8_10_ecc_info_query_correctable_error_count(adev, + node_inst, umc_inst, ch_inst, + &(err_data->ce_count)); + umc_v8_10_ecc_info_query_uncorrectable_error_count(adev, + node_inst, umc_inst, ch_inst, + &(err_data->ue_count)); + return 0; +} + static void umc_v8_10_ecc_info_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - uint32_t node_inst = 0; - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - - /* TODO: driver needs to toggle DF Cstate to ensure - * safe access of UMC registers. Will add the protection - */ - LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) { - umc_v8_10_ecc_info_query_correctable_error_count(adev, - node_inst, umc_inst, ch_inst, - &(err_data->ce_count)); - umc_v8_10_ecc_info_query_uncorrectable_error_count(adev, - node_inst, umc_inst, ch_inst, - &(err_data->ue_count)); - } + amdgpu_umc_loop_channels(adev, + umc_v8_10_ecc_info_query_ecc_error_count, ras_error_status); } -static void umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev, - struct ras_err_data *err_data, - uint32_t ch_inst, - uint32_t umc_inst, - uint32_t node_inst) +static int umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev, + uint32_t node_inst, uint32_t umc_inst, + uint32_t ch_inst, void *data) { uint32_t eccinfo_table_idx; uint64_t mc_umc_status, err_addr; - + struct ras_err_data *err_data = (struct ras_err_data *)data; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); eccinfo_table_idx = node_inst * adev->umc.umc_inst_num * @@ -447,10 +417,10 @@ static void umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev, mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if (mc_umc_status == 0) - return; + return 0; if (!err_data->err_addr) - return; + return 0; /* calculate error address if ue error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && @@ -463,28 +433,15 @@ static void umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev, umc_v8_10_convert_error_address(adev, err_data, err_addr, ch_inst, umc_inst, node_inst, mc_umc_status); } + + return 0; } static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - uint32_t node_inst = 0; - uint32_t umc_inst = 0; - uint32_t ch_inst = 0; - - /* TODO: driver needs to toggle DF Cstate to ensure - * safe access of UMC resgisters. Will add the protection - * when firmware interface is ready - */ - LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) { - umc_v8_10_ecc_info_query_error_address(adev, - err_data, - ch_inst, - umc_inst, - node_inst); - } + amdgpu_umc_loop_channels(adev, + umc_v8_10_ecc_info_query_error_address, ras_error_status); } const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e407be6cb63c..e32b656b3dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -444,6 +444,7 @@ static int uvd_v7_0_sw_init(void *handle) continue; if (!amdgpu_sriov_vf(adev)) { ring = &adev->uvd.inst[j].ring; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "uvd_%d", ring->me); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0, @@ -454,6 +455,7 @@ static int uvd_v7_0_sw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst[j].ring_enc[i]; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; @@ -1397,7 +1399,7 @@ static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t data0, data1, mask; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -1440,7 +1442,7 @@ static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -1802,7 +1804,6 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { .align_mask = 0xf, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = uvd_v7_0_ring_get_rptr, .get_wptr = uvd_v7_0_ring_get_wptr, .set_wptr = uvd_v7_0_ring_set_wptr, @@ -1835,7 +1836,6 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { .nop = HEVC_ENC_CMD_NO_OP, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = uvd_v7_0_enc_ring_get_rptr, .get_wptr = uvd_v7_0_enc_ring_get_wptr, .set_wptr = uvd_v7_0_enc_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 66cd3d11aa4b..57b85bb6a1e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -466,6 +466,7 @@ static int vce_v4_0_sw_init(void *handle) enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); ring = &adev->vce.ring[i]; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "vce%d", i); if (amdgpu_sriov_vf(adev)) { /* DOORBELL only works under SRIOV */ @@ -1021,7 +1022,7 @@ static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -1103,7 +1104,6 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { .nop = VCE_CMD_NO_OP, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vce_v4_0_ring_get_rptr, .get_wptr = vce_v4_0_ring_get_wptr, .set_wptr = vce_v4_0_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c b/drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c index 1ceda3d0cd5b..2b9ddb3d2fe1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c @@ -65,7 +65,7 @@ void vcn_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, void vcn_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t data0, data1, mask; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index c305b2cb8490..761c28fa6ec1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -120,6 +120,7 @@ static int vcn_v1_0_sw_init(void *handle) return r; ring = &adev->vcn.inst->ring_dec; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -141,6 +142,7 @@ static int vcn_v1_0_sw_init(void *handle) enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); ring = &adev->vcn.inst->ring_enc[i]; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, hw_prio, NULL); @@ -1548,7 +1550,7 @@ static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t data0, data1, mask; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -1693,7 +1695,7 @@ static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -1977,7 +1979,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .support_64bit_ptrs = false, .no_user_fence = true, .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v1_0_dec_ring_get_rptr, .get_wptr = vcn_v1_0_dec_ring_get_wptr, .set_wptr = vcn_v1_0_dec_ring_set_wptr, @@ -2012,7 +2013,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { .nop = VCN_ENC_CMD_NO_OP, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v1_0_enc_ring_get_rptr, .get_wptr = vcn_v1_0_enc_ring_get_wptr, .set_wptr = vcn_v1_0_enc_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 4b4cd88414e0..7c2b3aa48083 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -129,6 +129,7 @@ static int vcn_v2_0_sw_init(void *handle) ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -159,6 +160,7 @@ static int vcn_v2_0_sw_init(void *handle) ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; + ring->vm_hub = AMDGPU_MMHUB_0; if (!amdgpu_sriov_vf(adev)) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; else @@ -1511,7 +1513,7 @@ void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t data0, data1, mask; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -1671,7 +1673,7 @@ void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) { - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); @@ -2014,7 +2016,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v2_0_dec_ring_get_rptr, .get_wptr = vcn_v2_0_dec_ring_get_wptr, .set_wptr = vcn_v2_0_dec_ring_set_wptr, @@ -2045,7 +2046,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v2_0_enc_ring_get_rptr, .get_wptr = vcn_v2_0_enc_ring_get_wptr, .set_wptr = vcn_v2_0_enc_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index b0b0e69c6a94..ab0b45d0ead1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -186,6 +186,12 @@ static int vcn_v2_5_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + (amdgpu_sriov_vf(adev) ? 2*j : 8*j); + + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) + ring->vm_hub = AMDGPU_MMHUB_1; + else + ring->vm_hub = AMDGPU_MMHUB_0; + sprintf(ring->name, "vcn_dec_%d", j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -201,6 +207,11 @@ static int vcn_v2_5_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) + ring->vm_hub = AMDGPU_MMHUB_1; + else + ring->vm_hub = AMDGPU_MMHUB_0; + sprintf(ring->name, "vcn_enc_%d.%d", j, i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0, @@ -225,6 +236,10 @@ static int vcn_v2_5_sw_init(void *handle) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode; + r = amdgpu_vcn_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -1558,38 +1573,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_1, - .get_rptr = vcn_v2_5_dec_ring_get_rptr, - .get_wptr = vcn_v2_5_dec_ring_get_wptr, - .set_wptr = vcn_v2_5_dec_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ - 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ - 6, - .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ - .emit_ib = vcn_v2_0_dec_ring_emit_ib, - .emit_fence = vcn_v2_0_dec_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = vcn_v2_0_dec_ring_test_ring, - .test_ib = amdgpu_vcn_dec_ring_test_ib, - .insert_nop = vcn_v2_0_dec_ring_insert_nop, - .insert_start = vcn_v2_0_dec_ring_insert_start, - .insert_end = vcn_v2_0_dec_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - -static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_DEC, - .align_mask = 0xf, - .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v2_5_dec_ring_get_rptr, .get_wptr = vcn_v2_5_dec_ring_get_wptr, .set_wptr = vcn_v2_5_dec_ring_set_wptr, @@ -1689,7 +1672,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, - .vmhub = AMDGPU_MMHUB_1, .get_rptr = vcn_v2_5_enc_ring_get_rptr, .get_wptr = vcn_v2_5_enc_ring_get_wptr, .set_wptr = vcn_v2_5_enc_ring_set_wptr, @@ -1715,36 +1697,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_ENC, - .align_mask = 0x3f, - .nop = VCN_ENC_CMD_NO_OP, - .vmhub = AMDGPU_MMHUB_0, - .get_rptr = vcn_v2_5_enc_ring_get_rptr, - .get_wptr = vcn_v2_5_enc_ring_get_wptr, - .set_wptr = vcn_v2_5_enc_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + - 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ - 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ - 1, /* vcn_v2_0_enc_ring_insert_end */ - .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ - .emit_ib = vcn_v2_0_enc_ring_emit_ib, - .emit_fence = vcn_v2_0_enc_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_enc_ring_test_ring, - .test_ib = amdgpu_vcn_enc_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, - .insert_end = vcn_v2_0_enc_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) { int i; @@ -1752,10 +1704,7 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; - else /* CHIP_ALDEBARAN */ - adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs; + adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; adev->vcn.inst[i].ring_dec.me = i; DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i); } @@ -1769,10 +1718,7 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << j)) continue; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; - else /* CHIP_ALDEBARAN */ - adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs; + adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; adev->vcn.inst[j].ring_enc[i].me = j; } DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j); @@ -2031,6 +1977,4 @@ static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev) default: break; } - - amdgpu_vcn_set_ras_funcs(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 66439388faee..3eab186261aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -189,6 +189,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; } + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "vcn_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, AMDGPU_RING_PRIO_DEFAULT, @@ -212,6 +213,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; } + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "vcn_enc_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, hw_prio, &adev->vcn.inst[i].sched_score); @@ -1738,7 +1740,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .align_mask = 0x3f, .nop = VCN_DEC_SW_CMD_NO_OP, .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v3_0_dec_ring_get_rptr, .get_wptr = vcn_v3_0_dec_ring_get_wptr, .set_wptr = vcn_v3_0_dec_ring_set_wptr, @@ -1899,7 +1900,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, .secure_submission_supported = true, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v3_0_dec_ring_get_rptr, .get_wptr = vcn_v3_0_dec_ring_get_wptr, .set_wptr = vcn_v3_0_dec_ring_set_wptr, @@ -2000,7 +2000,6 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v3_0_enc_ring_get_rptr, .get_wptr = vcn_v3_0_enc_ring_get_wptr, .set_wptr = vcn_v3_0_enc_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 43d587404c3e..bf0674039598 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -149,7 +149,7 @@ static int vcn_v4_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1; else ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; - + ring->vm_hub = AMDGPU_MMHUB_0; sprintf(ring->name, "vcn_unified_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, @@ -181,6 +181,10 @@ static int vcn_v4_0_sw_init(void *handle) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode; + r = amdgpu_vcn_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -1794,7 +1798,6 @@ static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, - .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v4_0_unified_ring_get_rptr, .get_wptr = vcn_v4_0_unified_ring_get_wptr, .set_wptr = vcn_v4_0_unified_ring_set_wptr, @@ -2123,6 +2126,4 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev) default: break; } - - amdgpu_vcn_set_ras_funcs(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 1706081d054d..536128447b71 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -38,6 +38,11 @@ #define mmIH_CHICKEN_ALDEBARAN 0x18d #define mmIH_CHICKEN_ALDEBARAN_BASE_IDX 0 +#define mmIH_RETRY_INT_CAM_CNTL_ALDEBARAN 0x00ea +#define mmIH_RETRY_INT_CAM_CNTL_ALDEBARAN_BASE_IDX 0 +#define IH_RETRY_INT_CAM_CNTL_ALDEBARAN__ENABLE__SHIFT 0x10 +#define IH_RETRY_INT_CAM_CNTL_ALDEBARAN__ENABLE_MASK 0x00010000L + static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev); /** @@ -251,36 +256,14 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev, return 0; } -/** - * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring - * - * @adev: amdgpu_device pointer - * - * Reroute VMC and UMC interrupts on primary ih ring to - * ih ring 1 so they won't lose when bunches of page faults - * interrupts overwhelms the interrupt handler(VEGA20) - */ -static void vega20_ih_reroute_ih(struct amdgpu_device *adev) +static uint32_t vega20_setup_retry_doorbell(u32 doorbell_index) { - uint32_t tmp; + u32 val = 0; - /* vega20 ih reroute will go through psp this - * function is used for newer asics starting arcturus - */ - if (adev->ip_versions[OSSSYS_HWIP][0] >= IP_VERSION(4, 2, 1)) { - /* Reroute to IH ring 1 for VMC */ - WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); - - /* Reroute IH ring 1 for UTCL2 */ - WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); - } + val = REG_SET_FIELD(val, IH_DOORBELL_RPTR, OFFSET, doorbell_index); + val = REG_SET_FIELD(val, IH_DOORBELL_RPTR, ENABLE, 1); + + return val; } /** @@ -321,7 +304,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) /* psp firmware won't program IH_CHICKEN for aldebaran * driver needs to program it properly according to * MC_SPACE type in IH_RB_CNTL */ - if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) { + if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) || + (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) { ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN); if (adev->irq.ih.use_bus_addr) { ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, @@ -332,8 +316,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(ih); i++) { if (ih[i]->ring_size) { - if (i == 1) - vega20_ih_reroute_ih(adev); ret = vega20_ih_enable_ring(adev, ih[i]); if (ret) return ret; @@ -346,6 +328,20 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) pci_set_master(adev->pdev); + /* Allocate the doorbell for IH Retry CAM */ + adev->irq.retry_cam_doorbell_index = (adev->doorbell_index.ih + 3) << 1; + WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RETRY_CAM, + vega20_setup_retry_doorbell(adev->irq.retry_cam_doorbell_index)); + + /* Enable IH Retry CAM */ + if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) + WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN, + ENABLE, 1); + else + WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL, ENABLE, 1); + + adev->irq.retry_cam_enabled = true; + /* enable interrupts */ ret = vega20_ih_toggle_interrupts(adev, true); if (ret) @@ -551,12 +547,14 @@ static int vega20_ih_sw_init(void *handle) adev->irq.ih1.use_doorbell = true; adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); - if (r) - return r; + if (adev->ip_versions[OSSSYS_HWIP][0] != IP_VERSION(4, 4, 2)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; - adev->irq.ih2.use_doorbell = true; - adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + adev->irq.ih2.use_doorbell = true; + adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + } /* initialize ih control registers offset */ vega20_ih_init_register_offset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index ceab8783575c..531f173ade2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1101,24 +1101,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) return 0; } -static void vi_pcie_gen3_enable(struct amdgpu_device *adev) -{ - if (pci_is_root_bus(adev->pdev->bus)) - return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (adev->flags & AMD_IS_APU) - return; - - if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) - return; - - /* todo */ -} - static void vi_enable_aspm(struct amdgpu_device *adev) { u32 data, orig; @@ -1728,8 +1710,6 @@ static int vi_common_hw_init(void *handle) /* move the golden regs per IP block */ vi_init_golden_registers(adev); - /* enable pcie gen2/3 link */ - vi_pcie_gen3_enable(adev); /* enable aspm */ vi_program_aspm(adev); /* enable the doorbell aperture */ |