diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
90 files changed, 3248 insertions, 1465 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 6bf6cfaea3f1..13ebb1f71e49 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -56,7 +56,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ - amdgpu_fw_attestation.o + amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o @@ -71,7 +71,7 @@ amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \ - nbio_v7_2.o dimgrey_cavefish_reg_init.o + nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o # add DF block amdgpu-y += \ @@ -97,6 +97,7 @@ amdgpu-y += \ tonga_ih.o \ cz_ih.o \ vega10_ih.o \ + vega20_ih.o \ navi10_ih.o # add PSP block @@ -170,7 +171,8 @@ amdgpu-y += \ # add SMUIO block amdgpu-y += \ smuio_v9_0.o \ - smuio_v11_0.o + smuio_v11_0.o \ + smuio_v11_0_6.o # add amdkfd interfaces amdgpu-y += amdgpu_amdkfd.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5993dd0fdd8e..86452f4f3c82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -55,7 +55,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_execbuf_util.h> #include <drm/amdgpu_drm.h> @@ -89,6 +88,7 @@ #include "amdgpu_gfx.h" #include "amdgpu_sdma.h" #include "amdgpu_nbio.h" +#include "amdgpu_hdp.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_csa.h" @@ -107,6 +107,7 @@ #include "amdgpu_gfxhub.h" #include "amdgpu_df.h" #include "amdgpu_smuio.h" +#include "amdgpu_hdp.h" #define MAX_GPU_INSTANCE 16 @@ -286,7 +287,7 @@ enum amdgpu_kiq_irq { #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ -#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ +#define MAX_KIQ_REG_TRY 1000 int amdgpu_device_ip_set_clockgating_state(void *dev, enum amd_ip_block_type block_type, @@ -578,7 +579,8 @@ enum amd_reset_method { AMD_RESET_METHOD_MODE0, AMD_RESET_METHOD_MODE1, AMD_RESET_METHOD_MODE2, - AMD_RESET_METHOD_BACO + AMD_RESET_METHOD_BACO, + AMD_RESET_METHOD_PCI, }; /* @@ -608,7 +610,6 @@ struct amdgpu_asic_funcs { /* invalidate hdp read cache */ void (*invalidate_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); - void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev); /* check if the asic needs a full reset of if soft reset will work */ bool (*need_full_reset)(struct amdgpu_device *adev); /* initialize doorbell layout for specific asic*/ @@ -891,6 +892,7 @@ struct amdgpu_device { /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; + struct amdgpu_irq_src vline0_irq; struct amdgpu_irq_src vupdate_irq; struct amdgpu_irq_src pageflip_irq; struct amdgpu_irq_src hpd_irq; @@ -921,6 +923,9 @@ struct amdgpu_device { /* nbio */ struct amdgpu_nbio nbio; + /* hdp */ + struct amdgpu_hdp hdp; + /* smuio */ struct amdgpu_smuio smuio; @@ -1202,8 +1207,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) -#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) -#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) +#define amdgpu_asic_flush_hdp(adev, r) \ + ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r))) +#define amdgpu_asic_invalidate_hdp(adev, r) \ + ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : (adev)->hdp.funcs->invalidate_hdp((adev), (r))) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) @@ -1222,6 +1229,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); +int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index db96d69eb45e..c5343a5eecbe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -47,12 +47,8 @@ int amdgpu_amdkfd_init(void) amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh; amdgpu_amdkfd_total_mem_size *= si.mem_unit; -#ifdef CONFIG_HSA_AMD ret = kgd2kfd_init(); amdgpu_amdkfd_gpuvm_init_mem_limits(); -#else - ret = -ENOENT; -#endif kfd_initialized = !ret; return ret; @@ -696,86 +692,3 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) return adev->have_atomics_support; } - -#ifndef CONFIG_HSA_AMD -bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) -{ - return false; -} - -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) -{ -} - -int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) -{ - return 0; -} - -void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, - struct amdgpu_vm *vm) -{ -} - -struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) -{ - return NULL; -} - -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) -{ - return 0; -} - -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - unsigned int asic_type, bool vf) -{ - return NULL; -} - -bool kgd2kfd_device_init(struct kfd_dev *kfd, - struct drm_device *ddev, - const struct kgd2kfd_shared_resources *gpu_resources) -{ - return false; -} - -void kgd2kfd_device_exit(struct kfd_dev *kfd) -{ -} - -void kgd2kfd_exit(void) -{ -} - -void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) -{ -} - -int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) -{ - return 0; -} - -int kgd2kfd_pre_reset(struct kfd_dev *kfd) -{ - return 0; -} - -int kgd2kfd_post_reset(struct kfd_dev *kfd) -{ - return 0; -} - -void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) -{ -} - -void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) -{ -} - -void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) -{ -} -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index ea391ca7f2f1..a81d9cacf9b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -94,11 +94,6 @@ enum kgd_engine_type { KGD_ENGINE_MAX }; -struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, - struct mm_struct *mm); -bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); -struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); -int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); struct amdkfd_process_info { /* List head of all VMs that belong to a KFD process */ @@ -132,8 +127,6 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev); - -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); @@ -153,6 +146,38 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit); +struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, + struct mm_struct *mm); +#if IS_ENABLED(CONFIG_HSA_AMD) +bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); +struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); +#else +static inline +bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) +{ + return false; +} + +static inline +struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) +{ + return NULL; +} + +static inline +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) +{ + return 0; +} + +static inline +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) +{ + return 0; +} +#endif /* Shared API */ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, @@ -215,8 +240,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, struct file *filp, u32 pasid, void **vm, void **process_info, struct dma_fence **ef); -void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, - struct amdgpu_vm *vm); void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm); void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm); uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); @@ -236,23 +259,43 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, struct kgd_mem *mem, void **kptr, uint64_t *size); int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence **ef); - int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, struct kfd_vm_fault_info *info); - int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, struct dma_buf *dmabuf, uint64_t va, void *vm, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); - -void amdgpu_amdkfd_gpuvm_init_mem_limits(void); -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); - int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, struct tile_config *config); +#if IS_ENABLED(CONFIG_HSA_AMD) +void amdgpu_amdkfd_gpuvm_init_mem_limits(void); +void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, + struct amdgpu_vm *vm); +void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +#else +static inline +void amdgpu_amdkfd_gpuvm_init_mem_limits(void) +{ +} +static inline +void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ +} + +static inline +void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +{ +} +#endif /* KGD2KFD callbacks */ +int kgd2kfd_quiesce_mm(struct mm_struct *mm); +int kgd2kfd_resume_mm(struct mm_struct *mm); +int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, + struct dma_fence *fence); +#if IS_ENABLED(CONFIG_HSA_AMD) int kgd2kfd_init(void); void kgd2kfd_exit(void); struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, @@ -266,11 +309,68 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); -int kgd2kfd_quiesce_mm(struct mm_struct *mm); -int kgd2kfd_resume_mm(struct mm_struct *mm); -int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, - struct dma_fence *fence); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask); +#else +static inline int kgd2kfd_init(void) +{ + return -ENOENT; +} +static inline void kgd2kfd_exit(void) +{ +} + +static inline +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, + unsigned int asic_type, bool vf) +{ + return NULL; +} + +static inline +bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, + const struct kgd2kfd_shared_resources *gpu_resources) +{ + return false; +} + +static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) +{ +} + +static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +{ +} + +static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +{ + return 0; +} + +static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) +{ + return 0; +} + +static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) +{ + return 0; +} + +static inline +void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) +{ +} + +static inline +void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) +{ +} + +static inline +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) +{ +} +#endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 4763bab7a4d0..62aa1a6f64ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -23,7 +23,6 @@ #include "amdgpu_amdkfd.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" -#include "navi10_enum.h" #include "athub/athub_2_0_0_offset.h" #include "athub/athub_2_0_0_sh_mask.h" #include "oss/osssys_5_0_0_offset.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 50016bf9c427..fad3b91f74f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -24,7 +24,6 @@ #include "amdgpu_amdkfd.h" #include "gc/gc_10_3_0_offset.h" #include "gc/gc_10_3_0_sh_mask.h" -#include "navi10_enum.h" #include "oss/osssys_5_0_0_offset.h" #include "oss/osssys_5_0_0_sh_mask.h" #include "soc15_common.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index d1ed4f8df2b7..ac0a432a9bf7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -454,7 +454,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_bo *bo = mem->bo; uint64_t va = mem->va; struct list_head *list_bo_va = &mem->bo_va_list; - unsigned long bo_size = bo->tbo.mem.size; + unsigned long bo_size = bo->tbo.base.size; if (!va) { pr_err("Invalid VA when adding BO to VM\n"); @@ -1277,7 +1277,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size) { struct amdkfd_process_info *process_info = mem->process_info; - unsigned long bo_size = mem->bo->tbo.mem.size; + unsigned long bo_size = mem->bo->tbo.base.size; struct kfd_bo_va_list *entry, *tmp; struct bo_vm_reservation_context ctx; struct ttm_validate_buffer *bo_list_entry; @@ -1398,7 +1398,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( mutex_lock(&mem->lock); domain = mem->domain; - bo_size = bo->tbo.mem.size; + bo_size = bo->tbo.base.size; pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", mem->va, @@ -1502,7 +1502,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdkfd_process_info *process_info = ((struct amdgpu_vm *)vm)->process_info; - unsigned long bo_size = mem->bo->tbo.mem.size; + unsigned long bo_size = mem->bo->tbo.base.size; struct kfd_bo_va_list *entry; struct bo_vm_reservation_context ctx; int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 055f600eeed8..cfb1a9a04477 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -155,7 +155,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0}; int len; - if (!adev->asic_funcs->read_bios_from_rom) + if (!adev->asic_funcs || !adev->asic_funcs->read_bios_from_rom) return false; /* validate VBIOS signature */ @@ -348,7 +348,8 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return igp_read_bios_from_vram(adev); else - return amdgpu_asic_read_disabled_bios(adev); + return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ? + false : amdgpu_asic_read_disabled_bios(adev); } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 594a0108e90f..3e240b952e79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -98,8 +98,7 @@ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, return 0; error_free: - if (info) - kvfree(info); + kvfree(info); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a6667a2ca0db..0a25fecf488a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -35,6 +35,7 @@ #include "amdgpu_dm_debugfs.h" #include "amdgpu_ras.h" #include "amdgpu_rap.h" +#include "amdgpu_securedisplay.h" #include "amdgpu_fw_attestation.h" /** @@ -1427,7 +1428,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) struct dma_fence *fence; spin_lock(&sched->job_list_lock); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry(s_job, &sched->pending_list, list) { fence = sched->ops->run_job(s_job); dma_fence_put(fence); } @@ -1459,10 +1460,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) no_preempt: spin_lock(&sched->job_list_lock); - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { if (dma_fence_is_signaled(&s_job->s_fence->finished)) { /* remove job from ring_mirror_list */ - list_del_init(&s_job->node); + list_del_init(&s_job->list); sched->ops->free_job(s_job); continue; } @@ -1669,6 +1670,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_rap_debugfs_init(adev); + amdgpu_securedisplay_debugfs_init(adev); + amdgpu_fw_attestation_debugfs_init(adev); return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index cab1ebaf6d62..7052dc35d278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -929,6 +929,18 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); } +/** + * amdgpu_device_pci_reset - reset the GPU using generic PCI means + * + * @adev: amdgpu_device pointer + * + * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). + */ +int amdgpu_device_pci_reset(struct amdgpu_device *adev) +{ + return pci_reset_function(adev->pdev); +} + /* * GPU doorbell aperture helpers function. */ @@ -1105,8 +1117,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) */ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) { - u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size); - u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; + int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); struct pci_bus *root; struct resource *res; unsigned i; @@ -1137,6 +1148,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (!res) return 0; + /* Limit the BAR size to what is available */ + rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, + rbar_size); + /* Disable memory decoding while we change the BAR addresses and size */ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); pci_write_config_word(adev->pdev, PCI_COMMAND, @@ -1422,24 +1437,22 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(dev->pdev, PCI_D0); - amdgpu_device_load_pci_state(dev->pdev); - r = pci_enable_device(dev->pdev); + pci_set_power_state(pdev, PCI_D0); + amdgpu_device_load_pci_state(pdev); + r = pci_enable_device(pdev); if (r) DRM_WARN("pci_enable_device failed (%d)\n", r); amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; - drm_kms_helper_poll_enable(dev); } else { pr_info("switched off\n"); - drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_suspend(dev, true); - amdgpu_device_cache_pci_state(dev->pdev); + amdgpu_device_cache_pci_state(pdev); /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3cold); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1702,8 +1715,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) adev->enable_virtual_display = false; if (amdgpu_virtual_display) { - struct drm_device *ddev = adev_to_drm(adev); - const char *pci_address_name = pci_name(ddev->pdev); + const char *pci_address_name = pci_name(adev->pdev); char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); @@ -3116,7 +3128,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) */ adev->gfx_timeout = msecs_to_jiffies(10000); adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + if (amdgpu_sriov_vf(adev)) + adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? + msecs_to_jiffies(60000) : msecs_to_jiffies(10000); + else if (amdgpu_passthrough(adev)) adev->compute_timeout = msecs_to_jiffies(60000); else adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; @@ -3396,7 +3411,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } - pci_enable_pcie_error_reporting(adev->ddev.pdev); + pci_enable_pcie_error_reporting(adev->pdev); /* Post card if necessary */ if (amdgpu_device_need_post(adev)) { @@ -3719,7 +3734,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) r = amdgpu_device_ip_suspend_phase1(adev); - amdgpu_amdkfd_suspend(adev, !fbcon); + amdgpu_amdkfd_suspend(adev, adev->in_runpm); /* evict vram memory */ amdgpu_bo_evict_vram(adev); @@ -3803,7 +3818,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) } } } - r = amdgpu_amdkfd_resume(adev, !fbcon); + r = amdgpu_amdkfd_resume(adev, adev->in_runpm); if (r) return r; @@ -4154,8 +4169,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) continue; spin_lock(&ring->sched.job_list_lock); - job = list_first_entry_or_null(&ring->sched.ring_mirror_list, - struct drm_sched_job, node); + job = list_first_entry_or_null(&ring->sched.pending_list, + struct drm_sched_job, list); spin_unlock(&ring->sched.job_list_lock); if (job) return true; @@ -4205,6 +4220,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) case CHIP_NAVI14: case CHIP_NAVI12: case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + case CHIP_DIMGREY_CAVEFISH: break; default: goto disabled; @@ -4454,6 +4471,46 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) up_write(&adev->reset_sem); } +/* + * to lockup a list of amdgpu devices in a hive safely, if not a hive + * with multiple nodes, it will be similar as amdgpu_device_lock_adev. + * + * unlock won't require roll back. + */ +static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) +{ + struct amdgpu_device *tmp_adev = NULL; + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!hive) { + dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes"); + return -ENODEV; + } + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + if (!amdgpu_device_lock_adev(tmp_adev, hive)) + goto roll_back; + } + } else if (!amdgpu_device_lock_adev(adev, hive)) + return -EAGAIN; + + return 0; +roll_back: + if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) { + /* + * if the lockup iteration break in the middle of a hive, + * it may means there may has a race issue, + * or a hive device locked up independently. + * we may be in trouble and may not, so will try to roll back + * the lock and give out a warnning. + */ + dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock"); + list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) { + amdgpu_device_unlock_adev(tmp_adev); + } + } + return -EAGAIN; +} + static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) { struct pci_dev *p = NULL; @@ -4567,20 +4624,36 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", job ? job->base.id : -1, hive->hive_id); amdgpu_put_xgmi_hive(hive); + if (job) + drm_sched_increase_karma(&job->base); return 0; } mutex_lock(&hive->hive_lock); } /* + * lock the device before we try to operate the linked list + * if didn't get the device lock, don't touch the linked list since + * others may iterating it. + */ + r = amdgpu_device_lock_hive_adev(adev, hive); + if (r) { + dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress", + job ? job->base.id : -1); + + /* even we skipped this reset, still need to set the job to guilty */ + if (job) + drm_sched_increase_karma(&job->base); + goto skip_recovery; + } + + /* * Build list of devices to reset. * In case we are in XGMI hive mode, resort the device list * to put adev in the 1st position. */ INIT_LIST_HEAD(&device_list); if (adev->gmc.xgmi.num_physical_nodes > 1) { - if (!hive) - return -ENODEV; if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list)) list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list); device_list_handle = &hive->device_list; @@ -4591,13 +4664,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - if (!amdgpu_device_lock_adev(tmp_adev, hive)) { - dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress", - job ? job->base.id : -1); - r = 0; - goto skip_recovery; - } - /* * Try to put the audio codec into suspend state * before gpu reset started. @@ -4735,7 +4801,7 @@ skip_recovery: amdgpu_put_xgmi_hive(hive); } - if (r) + if (r && r != -EAGAIN) dev_info(adev->dev, "GPU reset end with ret = %d\n", r); return r; } @@ -4785,7 +4851,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); } else { - if (speed_cap == PCIE_SPEED_16_0GT) + if (speed_cap == PCIE_SPEED_32_0GT) + adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); + else if (speed_cap == PCIE_SPEED_16_0GT) adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | @@ -4805,7 +4877,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); } else { - if (platform_speed_cap == PCIE_SPEED_16_0GT) + if (platform_speed_cap == PCIE_SPEED_32_0GT) + adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); + else if (platform_speed_cap == PCIE_SPEED_16_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | @@ -4949,8 +5027,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; /* Fatal error, prepare for slot reset */ - case pci_channel_io_frozen: - /* + case pci_channel_io_frozen: + /* * Cancel and wait for all TDRs in progress if failing to * set adev->in_gpu_reset in amdgpu_device_lock_adev * @@ -5041,7 +5119,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) goto out; } - adev->in_pci_err_recovery = true; + adev->in_pci_err_recovery = true; r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset); adev->in_pci_err_recovery = false; if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index e42175e1acf1..47e0b48dc26f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -40,6 +40,7 @@ #include <linux/dma-buf.h> #include <linux/dma-fence-array.h> #include <linux/pci-p2pdma.h> +#include <linux/pm_runtime.h> /** * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation @@ -151,9 +152,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, if (attach->dev->driver == adev->dev->driver) return 0; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) + goto out; + r = amdgpu_bo_reserve(bo, false); if (unlikely(r != 0)) - return r; + goto out; /* * We only create shared fences for internal use, but importers @@ -165,11 +170,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, */ r = __dma_resv_make_exclusive(bo->tbo.base.resv); if (r) - return r; + goto out; bo->prime_shared_count++; amdgpu_bo_unreserve(bo); return 0; + +out: + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; } /** @@ -189,6 +198,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) bo->prime_shared_count--; + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } /** @@ -269,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, case TTM_PL_TT: sgt = drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, - bo->tbo.num_pages); + bo->tbo.ttm->num_pages); if (IS_ERR(sgt)) return sgt; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7169fb5e3d9c..03c529630c21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -132,8 +132,12 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; -/* OverDrive(bit 14) disabled by default*/ -uint amdgpu_pp_feature_mask = 0xffffbfff; + +/* + * OverDrive(bit 14) disabled by default + * GFX DCS(bit 19) disabled by default + */ +uint amdgpu_pp_feature_mask = 0xfff7bfff; uint amdgpu_force_long_training; int amdgpu_job_hang_limit; int amdgpu_lbpw = -1; @@ -789,9 +793,9 @@ module_param_named(tmz, amdgpu_tmz, int, 0444); /** * DOC: reset_method (int) - * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) + * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci) */ -MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)"); +MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)"); module_param_named(reset_method, amdgpu_reset_method, int, 0444); /** @@ -1094,6 +1098,7 @@ static const struct pci_device_id pciidlist[] = { /* Sienna_Cichlid */ {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, @@ -1206,7 +1211,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret) return ret; - ddev->pdev = pdev; pci_set_drvdata(pdev, ddev); ret = amdgpu_driver_load_kms(adev, ent->driver_data); @@ -1344,11 +1348,12 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) adev->in_runpm = true; if (amdgpu_device_supports_atpx(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - drm_kms_helper_poll_disable(drm_dev); ret = amdgpu_device_suspend(drm_dev, false); - if (ret) + if (ret) { + adev->in_runpm = false; return ret; + } if (amdgpu_device_supports_atpx(drm_dev)) { /* Only need to handle PCI state in the driver for ATPX @@ -1401,7 +1406,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) amdgpu_device_baco_exit(drm_dev); } ret = amdgpu_device_resume(drm_dev, false); - drm_kms_helper_poll_enable(drm_dev); if (amdgpu_device_supports_atpx(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; adev->in_runpm = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 0bf7d36c6686..51cd49c6f38f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -271,7 +271,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); - vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info); + vga_switcheroo_client_fb_set(adev->pdev, info); return 0; out: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c index 7c6e02e35573..8d1ad294cb02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c @@ -47,10 +47,9 @@ typedef struct FW_ATT_RECORD uint16_t AttFwIdV2; /* V2 FW ID field */ uint32_t AttFWVersion; /* FW Version */ uint16_t AttFWActiveFunctionID; /* The VF ID (only in VF Attestation Table) */ - uint16_t AttSource; /* FW source indicator */ - uint16_t RecordValid; /* Indicates whether the record is a valid entry */ - uint8_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */ - uint8_t Reserved; + uint8_t AttSource; /* FW source indicator */ + uint8_t RecordValid; /* Indicates whether the record is a valid entry */ + uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */ } FW_ATT_RECORD; static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 174a73eb23f0..b443907afcea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -619,7 +619,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int r = 0; if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in reserved area 0x%LX\n", args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; @@ -627,7 +627,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address >= AMDGPU_GMC_HOLE_START && args->va_address < AMDGPU_GMC_HOLE_END) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", args->va_address, AMDGPU_GMC_HOLE_START, AMDGPU_GMC_HOLE_END); @@ -639,14 +639,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size -= AMDGPU_VA_RESERVED_SIZE; if (args->va_address + args->map_size > vm_size) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%llx is in top reserved area 0x%llx\n", args->va_address + args->map_size, vm_size); return -EINVAL; } if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { - dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", + dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", args->flags); return -EINVAL; } @@ -658,7 +658,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, case AMDGPU_VA_OP_REPLACE: break; default: - dev_dbg(&dev->pdev->dev, "unsupported operation %d\n", + dev_dbg(dev->dev, "unsupported operation %d\n", args->operation); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index cd2c676a2797..8e0a6c62322e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -193,15 +193,16 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) } bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, - int pipe, int queue) + struct amdgpu_ring *ring) { - bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); - int cond; - /* Policy: alternate between normal and high priority */ - cond = multipipe_policy ? pipe : queue; - - return ((cond % 2) != 0); + /* Policy: use 1st queue as high priority compute queue if we + * have more than one compute queue. + */ + if (adev->gfx.num_compute_rings > 1 && + ring == &adev->gfx.compute_ring[0]) + return true; + return false; } void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6b5a8f4642cc..72dbcd2bc6a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -380,7 +380,7 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, int pipe, int queue); bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, - int pipe, int queue); + struct amdgpu_ring *ring); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 6e679db5e46f..fe1a39ffda72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -120,7 +120,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); - if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached) + if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached) return AMDGPU_BO_INVALID_OFFSET; if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h new file mode 100644 index 000000000000..43caf9f8cc11 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -0,0 +1,40 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_HDP_H__ +#define __AMDGPU_HDP_H__ + +struct amdgpu_hdp_funcs { + void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + void (*invalidate_hdp)(struct amdgpu_device *adev, + struct amdgpu_ring *ring); + void (*reset_ras_error_count)(struct amdgpu_device *adev); + void (*update_clock_gating)(struct amdgpu_device *adev, bool enable); + void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags); + void (*init_registers)(struct amdgpu_device *adev); +}; + +struct amdgpu_hdp { + const struct amdgpu_hdp_funcs *funcs; +}; + +#endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 47cad23a6b9e..bca4dddd5a15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -176,7 +176,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); mutex_init(&i2c->mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 024d0a563a65..7645223ea0ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -195,6 +195,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync) ring->funcs->emit_mem_sync(ring); + if (ring->funcs->emit_wave_limit && + ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) + ring->funcs->emit_wave_limit(ring, true); + if (ring->funcs->insert_start) ring->funcs->insert_start(ring); @@ -295,6 +299,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ring->current_ctx = fence_ctx; if (vm && ring->funcs->emit_switch_buffer) amdgpu_ring_emit_switch_buffer(ring); + + if (ring->funcs->emit_wave_limit && + ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) + ring->funcs->emit_wave_limit(ring, false); + amdgpu_ring_commit(ring); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index dcd9b4a8e20b..dc852af4f3b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -205,3 +205,48 @@ restart_ih: return IRQ_HANDLED; } +/** + * amdgpu_ih_decode_iv_helper - decode an interrupt vector + * + * @adev: amdgpu_device pointer + * @ih: ih ring to process + * @entry: IV entry + * + * Decodes the interrupt vector at the current rptr + * position and also advance the position for for Vega10 + * and later GPUs. + */ +void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + struct amdgpu_iv_entry *entry) +{ + /* wptr/rptr are in bytes! */ + u32 ring_index = ih->rptr >> 2; + uint32_t dw[8]; + + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); + dw[4] = le32_to_cpu(ih->ring[ring_index + 4]); + dw[5] = le32_to_cpu(ih->ring[ring_index + 5]); + dw[6] = le32_to_cpu(ih->ring[ring_index + 6]); + dw[7] = le32_to_cpu(ih->ring[ring_index + 7]); + + entry->client_id = dw[0] & 0xff; + entry->src_id = (dw[0] >> 8) & 0xff; + entry->ring_id = (dw[0] >> 16) & 0xff; + entry->vmid = (dw[0] >> 24) & 0xf; + entry->vmid_src = (dw[0] >> 31); + entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); + entry->timestamp_src = dw[2] >> 31; + entry->pasid = dw[3] & 0xffff; + entry->pasid_src = dw[3] >> 31; + entry->src_data[0] = dw[4]; + entry->src_data[1] = dw[5]; + entry->src_data[2] = dw[6]; + entry->src_data[3] = dw[7]; + + /* wptr/rptr are in bytes! */ + ih->rptr += 32; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 3c9cfe7eecff..6ed4a85fc7c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -30,6 +30,18 @@ struct amdgpu_device; struct amdgpu_iv_entry; +struct amdgpu_ih_regs { + uint32_t ih_rb_base; + uint32_t ih_rb_base_hi; + uint32_t ih_rb_cntl; + uint32_t ih_rb_wptr; + uint32_t ih_rb_rptr; + uint32_t ih_doorbell_rptr; + uint32_t ih_rb_wptr_addr_lo; + uint32_t ih_rb_wptr_addr_hi; + uint32_t psp_reg_id; +}; + /* * R6xx+ IH ring */ @@ -53,6 +65,7 @@ struct amdgpu_ih_ring { bool enabled; unsigned rptr; atomic_t lock; + struct amdgpu_ih_regs ih_regs; }; /* provided by the ih block */ @@ -75,5 +88,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, unsigned int num_dw); int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); - +void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index bea57e8e793f..afbbec82a289 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -444,7 +444,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); - } else if (adev->irq.virq[src_id]) { + } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) && + adev->irq.virq[src_id]) { generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); } else if (!adev->irq.client[client_id].sources) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index dcfe8a3b03ff..ff48101bab55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) } /* Signal all jobs already scheduled to HW */ - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry(s_job, &sched->pending_list, list) { struct drm_sched_fence *s_fence = s_job->s_fence; dma_fence_set_error(&s_fence->finished, -EHWPOISON); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b16b32797624..3c37cf1ae8b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -142,7 +142,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0) && - !pci_is_thunderbolt_attached(dev->pdev)) + !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) flags |= AMD_IS_PX; parent = pci_upstream_bridge(adev->pdev); @@ -156,7 +156,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) */ r = amdgpu_device_init(adev, flags); if (r) { - dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); + dev_err(dev->dev, "Fatal error during GPU init\n"); goto out; } @@ -199,7 +199,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) acpi_status = amdgpu_acpi_init(adev); if (acpi_status) - dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); + dev_dbg(dev->dev, "Error during ACPI methods call\n"); if (adev->runpm) { /* only need to skip on ATPX */ @@ -735,10 +735,10 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (!dev_info) return -ENOMEM; - dev_info->device_id = dev->pdev->device; + dev_info->device_id = adev->pdev->device; dev_info->chip_rev = adev->rev_id; dev_info->external_rev = adev->external_rev_id; - dev_info->pci_rev = dev->pdev->revision; + dev_info->pci_rev = adev->pdev->revision; dev_info->family = adev->family; dev_info->num_shader_engines = adev->gfx.config.max_shader_engines; dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index e62cc0e1a5ad..7c11bce4514b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -57,7 +57,6 @@ struct amdgpu_nbio_funcs { u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev); u32 (*get_rev_id)(struct amdgpu_device *adev); void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); - void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); u32 (*get_memsize)(struct amdgpu_device *adev); void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, bool use_doorbell, int doorbell_index, int doorbell_size); @@ -89,6 +88,7 @@ struct amdgpu_nbio_funcs { int (*ras_late_init)(struct amdgpu_device *adev); void (*enable_aspm)(struct amdgpu_device *adev, bool enable); + void (*program_aspm)(struct amdgpu_device *adev); }; struct amdgpu_nbio { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b4c8e5d5c763..4b29b8205442 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) if (r < 0) return r; - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap); if (r) return r; @@ -911,10 +911,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (bo->tbo.pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; + uint32_t mem_flags = bo->tbo.mem.placement; if (!(domain & amdgpu_mem_type_to_domain(mem_type))) return -EINVAL; + if ((mem_type == TTM_PL_VRAM) && + (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) && + !(mem_flags & TTM_PL_FLAG_CONTIGUOUS)) + return -EINVAL; + ttm_bo_pin(&bo->tbo); if (max_offset != 0) { @@ -930,7 +936,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (bo->tbo.base.import_attach) dma_buf_pin(bo->tbo.base.import_attach); - bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; /* force to pin into visible video ram */ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; @@ -983,6 +988,7 @@ error: */ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) { + bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; return amdgpu_bo_pin_restricted(bo, domain, 0, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 79120ec41396..9ac37569823f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) { - return bo->tbo.num_pages << PAGE_SHIFT; + return bo->tbo.base.size; } static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) { - return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; + return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; } static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 347fec669424..839917eb7bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -36,6 +36,7 @@ #include "psp_v12_0.h" #include "amdgpu_ras.h" +#include "amdgpu_securedisplay.h" static int psp_sysfs_init(struct amdgpu_device *adev); static void psp_sysfs_fini(struct amdgpu_device *adev); @@ -249,7 +250,7 @@ psp_cmd_submit_buf(struct psp_context *psp, { int ret; int index; - int timeout = 2000; + int timeout = 20000; bool ras_intr = false; bool skip_unsupport = false; @@ -282,7 +283,7 @@ psp_cmd_submit_buf(struct psp_context *psp, ras_intr = amdgpu_ras_intr_triggered(); if (ras_intr) break; - msleep(1); + usleep_range(10, 100); amdgpu_asic_invalidate_hdp(psp->adev, NULL); } @@ -1652,6 +1653,175 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id) } // RAP end +/* securedisplay start */ +static int psp_securedisplay_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for sa ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->securedisplay_context.securedisplay_shared_bo, + &psp->securedisplay_context.securedisplay_shared_mc_addr, + &psp->securedisplay_context.securedisplay_shared_buf); + + return ret; +} + +static int psp_securedisplay_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size); + + psp_prep_ta_load_cmd_buf(cmd, + psp->fw_pri_mc_addr, + psp->ta_securedisplay_ucode_size, + psp->securedisplay_context.securedisplay_shared_mc_addr, + PSP_SECUREDISPLAY_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (ret) + goto failed; + + psp->securedisplay_context.securedisplay_initialized = true; + psp->securedisplay_context.session_id = cmd->resp.session_id; + mutex_init(&psp->securedisplay_context.mutex); + +failed: + kfree(cmd); + return ret; +} + +static int psp_securedisplay_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_securedisplay_initialize(struct psp_context *psp) +{ + int ret; + struct securedisplay_cmd *securedisplay_cmd; + + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_securedisplay_ucode_size || + !psp->adev->psp.ta_securedisplay_start_addr) { + dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); + return 0; + } + + if (!psp->securedisplay_context.securedisplay_initialized) { + ret = psp_securedisplay_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_securedisplay_load(psp); + if (ret) + return ret; + + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__QUERY_TA); + + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); + if (ret) { + psp_securedisplay_unload(psp); + + amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo, + &psp->securedisplay_context.securedisplay_shared_mc_addr, + &psp->securedisplay_context.securedisplay_shared_buf); + + psp->securedisplay_context.securedisplay_initialized = false; + + dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); + return -EINVAL; + } + + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", + securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); + } + + return 0; +} + +static int psp_securedisplay_terminate(struct psp_context *psp) +{ + int ret; + + /* + * TODO:bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->securedisplay_context.securedisplay_initialized) + return 0; + + ret = psp_securedisplay_unload(psp); + if (ret) + return ret; + + psp->securedisplay_context.securedisplay_initialized = false; + + /* free securedisplay shared memory */ + amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo, + &psp->securedisplay_context.securedisplay_shared_mc_addr, + &psp->securedisplay_context.securedisplay_shared_buf); + + return ret; +} + +int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + + if (!psp->securedisplay_context.securedisplay_initialized) + return -EINVAL; + + if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && + ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) + return -EINVAL; + + mutex_lock(&psp->securedisplay_context.mutex); + + ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id); + + mutex_unlock(&psp->securedisplay_context.mutex); + + return ret; +} +/* SECUREDISPLAY end */ + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -2126,6 +2296,11 @@ skip_memalloc: if (ret) dev_err(psp->adev->dev, "RAP: Failed to initialize RAP\n"); + + ret = psp_securedisplay_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); } return 0; @@ -2176,6 +2351,7 @@ static int psp_hw_fini(void *handle) if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); + psp_securedisplay_terminate(psp); psp_rap_terminate(psp); psp_dtm_terminate(psp); psp_hdcp_terminate(psp); @@ -2240,6 +2416,11 @@ static int psp_suspend(void *handle) DRM_ERROR("Failed to terminate rap ta\n"); return ret; } + ret = psp_securedisplay_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate securedisplay ta\n"); + return ret; + } } ret = psp_asd_unload(psp); @@ -2323,6 +2504,11 @@ static int psp_resume(void *handle) if (ret) dev_err(psp->adev->dev, "RAP: Failed to initialize RAP\n"); + + ret = psp_securedisplay_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); } mutex_unlock(&adev->firmware.mutex); @@ -2629,6 +2815,11 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes); psp->ta_rap_start_addr = ucode_start_addr; break; + case TA_FW_TYPE_PSP_SECUREDISPLAY: + psp->ta_securedisplay_ucode_version = le32_to_cpu(desc->fw_version); + psp->ta_securedisplay_ucode_size = le32_to_cpu(desc->size_bytes); + psp->ta_securedisplay_start_addr = ucode_start_addr; + break; default: dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index da250bc1ac57..cb50ba445f8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -30,6 +30,7 @@ #include "ta_xgmi_if.h" #include "ta_ras_if.h" #include "ta_rap_if.h" +#include "ta_secureDisplay_if.h" #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 @@ -40,6 +41,7 @@ #define PSP_HDCP_SHARED_MEM_SIZE 0x4000 #define PSP_DTM_SHARED_MEM_SIZE 0x4000 #define PSP_RAP_SHARED_MEM_SIZE 0x4000 +#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000 #define PSP_SHARED_MEM_SIZE 0x4000 #define PSP_FW_NAME_LEN 0x24 @@ -171,6 +173,15 @@ struct psp_rap_context { struct mutex mutex; }; +struct psp_securedisplay_context { + bool securedisplay_initialized; + uint32_t session_id; + struct amdgpu_bo *securedisplay_shared_bo; + uint64_t securedisplay_shared_mc_addr; + void *securedisplay_shared_buf; + struct mutex mutex; +}; + #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 @@ -298,12 +309,17 @@ struct psp_context uint32_t ta_rap_ucode_size; uint8_t *ta_rap_start_addr; + uint32_t ta_securedisplay_ucode_version; + uint32_t ta_securedisplay_ucode_size; + uint8_t *ta_securedisplay_start_addr; + struct psp_asd_context asd_context; struct psp_xgmi_context xgmi_context; struct psp_ras_context ras; struct psp_hdcp_context hdcp_context; struct psp_dtm_context dtm_context; struct psp_rap_context rap_context; + struct psp_securedisplay_context securedisplay_context; struct mutex mutex; struct psp_memory_training_context mem_train_ctx; }; @@ -380,6 +396,7 @@ int psp_ras_trigger_error(struct psp_context *psp, int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id); +int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rlc_autoload_start(struct psp_context *psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 82e952696d24..1fb2a91ad30a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -846,7 +846,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, if (amdgpu_dpm_allow_xgmi_power_down(adev, true)) dev_warn(adev->dev, "Failed to allow XGMI power down"); - if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) dev_warn(adev->dev, "Failed to allow df cstate"); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 1a612f51ecd9..b644c78475fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -166,7 +166,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int max_dw, struct amdgpu_irq_src *irq_src, unsigned int irq_type, unsigned int hw_prio) { - int r, i; + int r; int sched_hw_submission = amdgpu_sched_hw_submission; u32 *num_sched; u32 hw_ip; @@ -258,8 +258,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } ring->max_dw = max_dw; - ring->priority = DRM_SCHED_PRIORITY_NORMAL; - mutex_init(&ring->priority_mutex); + ring->hw_prio = hw_prio; if (!ring->no_scheduler) { hw_ip = ring->funcs->type; @@ -268,9 +267,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, &ring->sched; } - for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i) - atomic_set(&ring->num_jobs[i], 0); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 7112137689db..56acec1075ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -197,6 +197,7 @@ struct amdgpu_ring_funcs { void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); int (*preempt_ib)(struct amdgpu_ring *ring); void (*emit_mem_sync)(struct amdgpu_ring *ring); + void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); }; struct amdgpu_ring { @@ -242,11 +243,7 @@ struct amdgpu_ring { struct dma_fence *vmid_wait; bool has_compute_vm_bug; bool no_scheduler; - - atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT]; - struct mutex priority_mutex; - /* protected by priority_mutex */ - int priority; + int hw_prio; #if defined(CONFIG_DEBUG_FS) struct dentry *ent; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c new file mode 100644 index 000000000000..834440ab9ff7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c @@ -0,0 +1,176 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> + +#include "amdgpu.h" +#include "amdgpu_securedisplay.h" + +/** + * DOC: AMDGPU SECUREDISPLAY debugfs test interface + * + * how to use? + * echo opcode <value> > <debugfs_dir>/dri/xxx/securedisplay_test + * eg. echo 1 > <debugfs_dir>/dri/xxx/securedisplay_test + * eg. echo 2 phy_id > <debugfs_dir>/dri/xxx/securedisplay_test + * + * opcode: + * 1:Query whether TA is responding used only for validation pupose + * 2: Send region of Interest and CRC value to I2C. (uint32)phy_id is + * send to determine which DIO scratch register should be used to get + * ROI and receive i2c_buf as the output. + * + * You can refer more detail from header file ta_securedisplay_if.h + * + */ + +void psp_securedisplay_parse_resp_status(struct psp_context *psp, + enum ta_securedisplay_status status) +{ + switch (status) { + case TA_SECUREDISPLAY_STATUS__SUCCESS: + break; + case TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE: + dev_err(psp->adev->dev, "Secure display: Generic Failure."); + break; + case TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER: + dev_err(psp->adev->dev, "Secure display: Invalid Parameter."); + break; + case TA_SECUREDISPLAY_STATUS__NULL_POINTER: + dev_err(psp->adev->dev, "Secure display: Null Pointer."); + break; + case TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to write to I2C."); + break; + case TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to Read DIO Scratch Register."); + break; + case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to Read CRC"); + break; + default: + dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status); + } +} + +void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, + enum ta_securedisplay_command command_id) +{ + *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf; + memset(*cmd, 0, sizeof(struct securedisplay_cmd)); + (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE; + (*cmd)->cmd_id = command_id; +} + +static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct psp_context *psp = &adev->psp; + struct securedisplay_cmd *securedisplay_cmd; + struct drm_device *dev = adev_to_drm(adev); + uint32_t phy_id; + uint32_t op; + int i; + char str[64]; + char i2c_output[256]; + int ret; + + if (*pos || size > sizeof(str) - 1) + return -EINVAL; + + memset(str, 0, sizeof(str)); + ret = copy_from_user(str, buf, size); + if (ret) + return -EFAULT; + + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); + return ret; + } + + if (size < 3) + sscanf(str, "%u ", &op); + else + sscanf(str, "%u %u", &op, &phy_id); + + switch (op) { + case 1: + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__QUERY_TA); + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); + if (!ret) { + if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) + dev_info(adev->dev, "SECUREDISPLAY: query securedisplay TA ret is 0x%X\n", + securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); + else + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + break; + case 2: + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_id; + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + if (!ret) { + if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) { + memset(i2c_output, 0, sizeof(i2c_output)); + for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++) + sprintf(i2c_output, "%s 0x%X", i2c_output, + securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]); + dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output); + } else { + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + } + break; + default: + dev_err(adev->dev, "Invalid input: %s\n", str); + } + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return size; +} + +static const struct file_operations amdgpu_securedisplay_debugfs_ops = { + .owner = THIS_MODULE, + .read = NULL, + .write = amdgpu_securedisplay_debugfs_write, + .llseek = default_llseek +}; + +void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + + if (!adev->psp.securedisplay_context.securedisplay_initialized) + return; + + debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root, + adev, &amdgpu_securedisplay_debugfs_ops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h new file mode 100644 index 000000000000..fe98574748f4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h @@ -0,0 +1,36 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _AMDGPU_SECUREDISPLAY_H +#define _AMDGPU_SECUREDISPLAY_H + +#include "amdgpu.h" +#include "ta_secureDisplay_if.h" + +void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev); +void psp_securedisplay_parse_resp_status(struct psp_context *psp, + enum ta_securedisplay_status status); +void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, + enum ta_securedisplay_command command_id); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 6752d8b13118..792d20261846 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -21,7 +21,7 @@ * */ -#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _AMDGPU_TRACE_H_ #include <linux/stringify.h> @@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create, TP_fast_assign( __entry->bo = bo; - __entry->pages = bo->tbo.num_pages; + __entry->pages = bo->tbo.mem.num_pages; __entry->type = bo->tbo.mem.mem_type; __entry->prefer = bo->preferred_domains; __entry->allow = bo->allowed_domains; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 4d8f19ab1014..9fd2157b133a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -46,7 +46,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> #include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> @@ -637,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, out: /* update statistics */ - atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); + atomic64_add(bo->base.size, &adev->num_bytes_moved); amdgpu_bo_move_notify(bo, evict, new_mem); return 0; } @@ -918,8 +917,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, goto release_sg; /* convert SG to linear array of pages and dma addresses */ - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); return 0; @@ -1265,9 +1264,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, ttm->sg = sgt; } - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, - ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); return 0; } @@ -2124,7 +2122,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return r; } - num_pages = bo->tbo.num_pages; + num_pages = bo->tbo.mem.num_pages; mm_node = bo->tbo.mem.mm_node; num_loops = 0; while (num_pages) { @@ -2154,7 +2152,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, } } - num_pages = bo->tbo.num_pages; + num_pages = bo->tbo.mem.num_pages; mm_node = bo->tbo.mem.mm_node; while (num_pages) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 0e43b46d3ab5..46449e70348b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -122,6 +122,9 @@ struct ta_firmware_header_v1_0 { uint32_t ta_dtm_ucode_version; uint32_t ta_dtm_offset_bytes; uint32_t ta_dtm_size_bytes; + uint32_t ta_securedisplay_ucode_version; + uint32_t ta_securedisplay_offset_bytes; + uint32_t ta_securedisplay_size_bytes; }; enum ta_fw_type { @@ -132,6 +135,7 @@ enum ta_fw_type { TA_FW_TYPE_PSP_HDCP, TA_FW_TYPE_PSP_DTM, TA_FW_TYPE_PSP_RAP, + TA_FW_TYPE_PSP_SECUREDISPLAY, }; struct ta_fw_bin_desc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 8b989670ed66..e2ed4689118a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1170,7 +1170,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, int r, i; r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_GTT, &bo, NULL, (void **)&msg); if (r) return r; @@ -1202,7 +1202,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, int r, i; r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_DOMAIN_GTT, &bo, NULL, (void **)&msg); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 0d5284b936e4..ea6a62f67e38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -1160,6 +1160,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_bo_free_kernel(&bo, NULL, NULL); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 4a77c7424dfc..99b82f3c2617 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -496,6 +496,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_job *job; struct amdgpu_ib *ib; uint64_t addr; + void *msg = NULL; int i, r; r = amdgpu_job_alloc_with_ib(adev, 64, @@ -505,6 +506,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); + msg = amdgpu_bo_kptr(bo); ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); ib->ptr[1] = addr; ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); @@ -523,7 +525,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); if (fence) *fence = dma_fence_get(f); @@ -536,7 +538,7 @@ err_free: err: amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); return r; } @@ -890,6 +892,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_bo_free_kernel(&bo, NULL, NULL); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 2d51b7694d1f..5da04d45b637 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -560,10 +560,14 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work); + int ret; - amdgpu_virt_read_pf2vf_data(adev); + ret = amdgpu_virt_read_pf2vf_data(adev); + if (ret) + goto out; amdgpu_virt_write_vf2pf_data(adev); +out: schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); } @@ -571,8 +575,8 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) { if (adev->virt.vf2pf_update_interval_ms != 0) { DRM_INFO("clean up the vf2pf work item\n"); - flush_delayed_work(&adev->virt.vf2pf_work); cancel_delayed_work_sync(&adev->virt.vf2pf_work); + adev->virt.vf2pf_update_interval_ms = 0; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0768c8686983..ad91c0c3c423 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -653,9 +653,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, if (!bo->parent) continue; - ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); + ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem, + &vm->lru_bulk_move); if (bo->shadow) ttm_bo_move_to_lru_tail(&bo->shadow->tbo, + &bo->shadow->tbo.mem, &vm->lru_bulk_move); } spin_unlock(&ttm_bo_glob.lru_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index d2de2a720a3d..c89b66bb70e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -473,6 +473,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, for (i = 0; pages_left >= pages_per_node; ++i) { unsigned long pages = rounddown_pow_of_two(pages_left); + /* Limit maximum size to 2GB due to SG table limitations */ + pages = min(pages, (2UL << (30 - PAGE_SHIFT))); + r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, pages_per_node, 0, place->fpfn, lpfn, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 541ef6be390f..659b385b27b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -324,7 +324,7 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev, struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) { - struct amdgpu_hive_info *hive = NULL, *tmp = NULL; + struct amdgpu_hive_info *hive = NULL; int ret; if (!adev->gmc.xgmi.hive_id) @@ -337,11 +337,9 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) mutex_lock(&xgmi_mutex); - if (!list_empty(&xgmi_hive_list)) { - list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) { - if (hive->hive_id == adev->gmc.xgmi.hive_id) - goto pro_end; - } + list_for_each_entry(hive, &xgmi_hive_list, node) { + if (hive->hive_id == adev->gmc.xgmi.hive_id) + goto pro_end; } hive = kzalloc(sizeof(*hive), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index 921a69abda55..5b90efd6f6d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -27,7 +27,6 @@ #include "athub/athub_2_0_0_offset.h" #include "athub/athub_2_0_0_sh_mask.h" #include "athub/athub_2_0_0_default.h" -#include "navi10_enum.h" #include "soc15_common.h" diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c index 66c183ddd43e..7b1b18350bf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c @@ -26,7 +26,6 @@ #include "athub/athub_2_1_0_offset.h" #include "athub/athub_2_1_0_sh_mask.h" -#include "navi10_enum.h" #include "soc15_common.h" diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 13737b317f7c..4d6832cc7fb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1251,13 +1251,22 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); } -static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) +/** + * cik_asic_pci_config_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Use PCI Config method to reset the GPU. + * + * Returns 0 for success. + */ +static int cik_asic_pci_config_reset(struct amdgpu_device *adev) { struct kv_reset_save_regs kv_save = { 0 }; u32 i; int r = -EINVAL; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); if (adev->flags & AMD_IS_APU) kv_save_regs_for_reset(adev, &kv_save); @@ -1285,26 +1294,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); - return r; -} - -/** - * cik_asic_pci_config_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * - * Use PCI Config method to reset the GPU. - * - * Returns 0 for success. - */ -static int cik_asic_pci_config_reset(struct amdgpu_device *adev) -{ - int r; - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - r = cik_gpu_pci_config_reset(adev); - amdgpu_atombios_scratch_regs_engine_hung(adev, false); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index da37f8a900af..307c01301c87 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -194,19 +194,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + + +out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index ffcc64ec6473..9810af712cc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -294,7 +294,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector) static const struct mode_size { int w; int h; - } common_modes[21] = { + } common_modes[] = { { 640, 480}, { 720, 480}, { 800, 600}, @@ -312,13 +312,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector) {1600, 1200}, {1920, 1080}, {1920, 1200}, + {2560, 1440}, {4096, 3112}, {3656, 2664}, {3840, 2160}, {4096, 2160}, }; - for (i = 0; i < 21; i++) { + for (i = 0; i < ARRAY_SIZE(common_modes); i++) { mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); drm_mode_probed_add(connector, mode); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d86b42a36560..45d1172b7bff 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -38,7 +38,6 @@ #include "smuio/smuio_11_0_0_offset.h" #include "smuio/smuio_11_0_0_sh_mask.h" #include "navi10_enum.h" -#include "hdp/hdp_5_0_0_offset.h" #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h" #include "soc15.h" @@ -71,6 +70,11 @@ #define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8 #define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L +#define mmCGTS_TCC_DISABLE_gc_10_3 0x5006 +#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX 1 +#define mmCGTS_USER_TCC_DISABLE_gc_10_3 0x5007 +#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX 1 + #define mmCP_MEC_CNTL_Sienna_Cichlid 0x0f55 #define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX 0 #define mmRLC_SAFE_MODE_Sienna_Cichlid 0x4ca0 @@ -99,10 +103,6 @@ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0 -#define mmCGTS_TCC_DISABLE_Vangogh 0x5006 -#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1 -#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007 -#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1 #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026 @@ -125,6 +125,7 @@ #define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1 #define mmGCR_GENERAL_CNTL_Vangogh 0x1580 #define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0 +#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh 0x0000FFFFL #define mmCP_HYP_PFP_UCODE_ADDR 0x5814 #define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1 @@ -3782,9 +3783,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; break; - case CHIP_VANGOGH: - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - break; default: break; } @@ -4494,8 +4492,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue) ? + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, @@ -4942,15 +4939,12 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev) /* TCCs are global (not instanced). */ uint32_t tcc_disable; - switch (adev->asic_type) { - case CHIP_VANGOGH: - tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) | - RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh); - break; - default: + if (adev->asic_type >= CHIP_SIENNA_CICHLID) { + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3); + } else { tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | - RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); - break; + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); } adev->gfx.config.tcc_disabled_mask = @@ -5715,7 +5709,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -5793,7 +5787,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); @@ -5870,7 +5864,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -6239,7 +6233,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); @@ -6547,8 +6541,7 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct struct amdgpu_device *adev = ring->adev; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue)) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; @@ -7847,6 +7840,20 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, data); + + /* + * CGPG enablement required and the register to program the hysteresis value + * RLC_PG_DELAY_3.CGCG_ACTIVE_BEFORE_CGPG to the desired CGPG hysteresis value + * in refclk count. Note that RLC FW is modified to take 16 bits from + * RLC_PG_DELAY_3[15:0] as the hysteresis instead of just 8 bits. + * + * The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us(0x4E20) + * as part of CGPG enablement starting point. + */ + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && adev->asic_type == CHIP_VANGOGH) { + data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); + } } static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable) @@ -7908,6 +7915,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, break; case CHIP_VANGOGH: gfx_v10_cntl_pg(adev, enable); + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 37639214cbbb..84d2eaa38101 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" +#include "amdgpu_ring.h" #include "vi.h" #include "vi_structs.h" #include "vid.h" @@ -1923,8 +1924,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue) ? + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT; /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, @@ -4442,8 +4442,7 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m struct amdgpu_device *adev = ring->adev; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue)) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; @@ -6847,6 +6846,66 @@ static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ } + +/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ +#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT 0x0000007f +static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring, + uint32_t pipe, bool enable) +{ + uint32_t val; + uint32_t wcl_cs_reg; + + val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT; + + switch (pipe) { + case 0: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0; + break; + case 1: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1; + break; + case 2: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2; + break; + case 3: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3; + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + + amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); + +} + +#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff +static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + int i; + + /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit + * number of gfx waves. Setting 5 bit will make sure gfx only gets + * around 25% of gpu resources. + */ + val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT; + amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val); + + /* Restrict waves for normal/low priority compute queues as well + * to get best QoS for high priority compute jobs. + * + * amdgpu controls only 1st ME(0-3 CS pipes). + */ + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + if (i != ring->pipe) + gfx_v8_0_emit_wave_limit_cs(ring, i, enable); + + } + +} + static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -6930,7 +6989,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */ 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ - 7, /* gfx_v8_0_emit_mem_sync_compute */ + 7 + /* gfx_v8_0_emit_mem_sync_compute */ + 5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */ + 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */ .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */ .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute, @@ -6944,6 +7005,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v8_0_ring_emit_wreg, .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute, + .emit_wave_limit = gfx_v8_0_emit_wave_limit, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 5f4805e4d04a..65db88bb6cbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -38,7 +38,6 @@ #include "gc/gc_9_0_sh_mask.h" #include "vega10_enum.h" -#include "hdp/hdp_4_0_offset.h" #include "soc15_common.h" #include "clearstate_gfx9.h" @@ -53,6 +52,7 @@ #include "asic_reg/pwr/pwr_10_0_offset.h" #include "asic_reg/pwr/pwr_10_0_sh_mask.h" +#include "asic_reg/gc/gc_9_0_default.h" #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 @@ -2228,8 +2228,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue) ? + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ return amdgpu_ring_init(adev, ring, 1024, @@ -3391,9 +3390,7 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m struct amdgpu_device *adev = ring->adev; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - if (amdgpu_gfx_is_high_priority_compute_queue(adev, - ring->pipe, - ring->queue)) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; @@ -6671,6 +6668,65 @@ static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ } +static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring, + uint32_t pipe, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + uint32_t wcl_cs_reg; + + /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ + val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT; + + switch (pipe) { + case 0: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0); + break; + case 1: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1); + break; + case 2: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2); + break; + case 3: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3); + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + + amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); + +} +static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + int i; + + + /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit + * number of gfx waves. Setting 5 bit will make sure gfx only gets + * around 25% of gpu resources. + */ + val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT; + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX), + val); + + /* Restrict waves for normal/low priority compute queues as well + * to get best QoS for high priority compute jobs. + * + * amdgpu controls only 1st ME(0-3 CS pipes). + */ + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + if (i != ring->pipe) + gfx_v9_0_emit_wave_limit_cs(ring, i, enable); + + } +} + static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, @@ -6760,7 +6816,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v9_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ - 7, /* gfx_v9_0_emit_mem_sync */ + 7 + /* gfx_v9_0_emit_mem_sync */ + 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */ + 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ .emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_fence = gfx_v9_0_ring_emit_fence, @@ -6776,6 +6834,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .emit_mem_sync = gfx_v9_0_emit_mem_sync, + .emit_wave_limit = gfx_v9_0_emit_wave_limit, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5648c48be77f..3b7c6c31fce1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -27,8 +27,6 @@ #include "gmc_v10_0.h" #include "umc_v8_7.h" -#include "hdp/hdp_5_0_0_offset.h" -#include "hdp/hdp_5_0_0_sh_mask.h" #include "athub/athub_2_0_0_sh_mask.h" #include "athub/athub_2_0_0_offset.h" #include "dcn/dcn_2_0_0_offset.h" @@ -312,7 +310,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, int r; /* flush hdp cache */ - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); /* For SRIOV run time, driver shouldn't access the register through MMIO * Directly use kiq to do the vm invalidation instead @@ -995,7 +993,6 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) { int r; bool value; - u32 tmp; if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -1014,15 +1011,10 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); - tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; - WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); - - tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); - WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); + adev->hdp.funcs->init_registers(adev); /* Flush HDP after it is initialized */ - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index e22268f9dba7..3686e777c76c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -31,8 +31,6 @@ #include "amdgpu_atomfirmware.h" #include "amdgpu_gem.h" -#include "hdp/hdp_4_0_offset.h" -#include "hdp/hdp_4_0_sh_mask.h" #include "gc/gc_9_0_sh_mask.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" @@ -241,60 +239,44 @@ static const char *mmhub_client_ids_vega20[][2] = { }; static const char *mmhub_client_ids_arcturus[][2] = { + [0][0] = "DBGU1", + [1][0] = "XDP", [2][0] = "MP1", - [3][0] = "MP0", - [10][0] = "UTCL2", - [13][0] = "OSS", [14][0] = "HDP", - [15][0] = "SDMA0", - [32+15][0] = "SDMA1", - [64+15][0] = "SDMA2", - [96+15][0] = "SDMA3", - [128+15][0] = "SDMA4", - [160+11][0] = "JPEG", - [160+12][0] = "VCN", - [160+13][0] = "VCNU", - [160+15][0] = "SDMA5", - [192+10][0] = "UTCL2", - [192+11][0] = "JPEG1", - [192+12][0] = "VCN1", - [192+13][0] = "VCN1U", - [192+15][0] = "SDMA6", - [224+15][0] = "SDMA7", + [171][0] = "JPEG", + [172][0] = "VCN", + [173][0] = "VCNU", + [203][0] = "JPEG1", + [204][0] = "VCN1", + [205][0] = "VCN1U", + [256][0] = "SDMA0", + [257][0] = "SDMA1", + [258][0] = "SDMA2", + [259][0] = "SDMA3", + [260][0] = "SDMA4", + [261][0] = "SDMA5", + [262][0] = "SDMA6", + [263][0] = "SDMA7", + [384][0] = "OSS", [0][1] = "DBGU1", [1][1] = "XDP", [2][1] = "MP1", - [3][1] = "MP0", - [13][1] = "OSS", [14][1] = "HDP", - [15][1] = "SDMA0", - [32+15][1] = "SDMA1", - [64+15][1] = "SDMA2", - [96+15][1] = "SDMA3", - [128+15][1] = "SDMA4", - [160+11][1] = "JPEG", - [160+12][1] = "VCN", - [160+13][1] = "VCNU", - [160+15][1] = "SDMA5", - [192+11][1] = "JPEG1", - [192+12][1] = "VCN1", - [192+13][1] = "VCN1U", - [192+15][1] = "SDMA6", - [224+15][1] = "SDMA7", -}; - -static const u32 golden_settings_vega10_hdp[] = -{ - 0xf64, 0x0fffffff, 0x00000000, - 0xf65, 0x0fffffff, 0x00000000, - 0xf66, 0x0fffffff, 0x00000000, - 0xf67, 0x0fffffff, 0x00000000, - 0xf68, 0x0fffffff, 0x00000000, - 0xf6a, 0x0fffffff, 0x00000000, - 0xf6b, 0x0fffffff, 0x00000000, - 0xf6c, 0x0fffffff, 0x00000000, - 0xf6d, 0x0fffffff, 0x00000000, - 0xf6e, 0x0fffffff, 0x00000000, + [171][1] = "JPEG", + [172][1] = "VCN", + [173][1] = "VCNU", + [203][1] = "JPEG1", + [204][1] = "VCN1", + [205][1] = "VCN1U", + [256][1] = "SDMA0", + [257][1] = "SDMA1", + [258][1] = "SDMA2", + [259][1] = "SDMA3", + [260][1] = "SDMA4", + [261][1] = "SDMA5", + [262][1] = "SDMA6", + [263][1] = "SDMA7", + [384][1] = "OSS", }; static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = @@ -1571,7 +1553,6 @@ static int gmc_v9_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool value; int r, i; - u32 tmp; /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); @@ -1583,31 +1564,13 @@ static int gmc_v9_0_hw_init(void *handle) WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); } - amdgpu_device_program_register_sequence(adev, - golden_settings_vega10_hdp, - ARRAY_SIZE(golden_settings_vega10_hdp)); - if (adev->mmhub.funcs->update_power_gating) adev->mmhub.funcs->update_power_gating(adev, true); - switch (adev->asic_type) { - case CHIP_ARCTURUS: - WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); - break; - default: - break; - } - - WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); - - tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); - WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); - - WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); - WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); + adev->hdp.funcs->init_registers(adev); /* After HDP is initialized, flush HDP.*/ - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c new file mode 100644 index 000000000000..e46621fed5b9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -0,0 +1,137 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "hdp_v4_0.h" +#include "amdgpu_ras.h" + +#include "hdp/hdp_4_0_offset.h" +#include "hdp/hdp_4_0_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> + +/* for Vega20 register name change */ +#define mmHDP_MEM_POWER_CTRL 0x00d4 +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L +#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 + +static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + else + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); +} + +static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) + WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); + else + amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( + HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); +} + +static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) + return; + /*read back hdp ras counter to reset it to 0 */ + RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); +} + +static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + if (adev->asic_type == CHIP_VEGA10 || + adev->asic_type == CHIP_VEGA12 || + adev->asic_type == CHIP_RAVEN) { + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) + data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; + else + data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; + + if (def != data) + WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); + } else { + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) + data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; + else + data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); + + if (def != data) + WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); + } +} + +static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + int data; + + /* AMD_CG_SUPPORT_HDP_LS */ + data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); + if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_HDP_LS; +} + +static void hdp_v4_0_init_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_ARCTURUS: + WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); + break; + default: + break; + } + + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); + + WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); + WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); +} + +const struct amdgpu_hdp_funcs hdp_v4_0_funcs = { + .flush_hdp = hdp_v4_0_flush_hdp, + .invalidate_hdp = hdp_v4_0_invalidate_hdp, + .reset_ras_error_count = hdp_v4_0_reset_ras_error_count, + .update_clock_gating = hdp_v4_0_update_clock_gating, + .get_clock_gating_state = hdp_v4_0_get_clockgating_state, + .init_registers = hdp_v4_0_init_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h new file mode 100644 index 000000000000..d1e6399e8c46 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h @@ -0,0 +1,31 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __HDP_V4_0_H__ +#define __HDP_V4_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c new file mode 100644 index 000000000000..7a15e669b68d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c @@ -0,0 +1,212 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "hdp_v5_0.h" + +#include "hdp/hdp_5_0_0_offset.h" +#include "hdp/hdp_5_0_0_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> + +static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + else + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); +} + +static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) { + WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); + } else { + amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( + HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); + } +} + +static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t hdp_clk_cntl, hdp_clk_cntl1; + uint32_t hdp_mem_pwr_cntl; + + if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD))) + return; + + hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); + hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); + + /* Before doing clock/power mode switch, + * forced on IPH & RC clock */ + hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, + IPH_MEM_CLK_SOFT_OVERRIDE, 1); + hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, + RC_MEM_CLK_SOFT_OVERRIDE, 1); + WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); + + /* HDP 5.0 doesn't support dynamic power mode switch, + * disable clock and power gating before any changing */ + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_CTRL_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_LS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_DS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_SD_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_CTRL_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_LS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_DS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_SD_EN, 0); + WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); + + /* only one clock gating mode (LS/DS/SD) can be enabled */ + if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_LS_EN, enable); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_LS_EN, enable); + } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_DS_EN, enable); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_DS_EN, enable); + } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_SD_EN, enable); + /* RC should not use shut down mode, fallback to ds */ + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_DS_EN, enable); + } + + /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to + * be set for SRAM LS/DS/SD */ + if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD)) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_CTRL_EN, 1); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_CTRL_EN, 1); + } + + WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); + + /* restore IPH & RC clock override after clock/power mode changing */ + WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); +} + +static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t hdp_clk_cntl; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) + return; + + hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); + + if (enable) { + hdp_clk_cntl &= + ~(uint32_t) + (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); + } else { + hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; + } + + WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); +} + +static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + hdp_v5_0_update_mem_power_gating(adev, enable); + hdp_v5_0_update_medium_grain_clock_gating(adev, enable); +} + +static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + uint32_t tmp; + + /* AMD_CG_SUPPORT_HDP_MGCG */ + tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); + if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) + *flags |= AMD_CG_SUPPORT_HDP_MGCG; + + /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ + tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); + if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_LS; + else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_DS; + else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_SD; +} + +static void hdp_v5_0_init_registers(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); + tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; + WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); +} + +const struct amdgpu_hdp_funcs hdp_v5_0_funcs = { + .flush_hdp = hdp_v5_0_flush_hdp, + .invalidate_hdp = hdp_v5_0_invalidate_hdp, + .update_clock_gating = hdp_v5_0_update_clock_gating, + .get_clock_gating_state = hdp_v5_0_get_clockgating_state, + .init_registers = hdp_v5_0_init_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h new file mode 100644 index 000000000000..2d5ec2b419f3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h @@ -0,0 +1,31 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __HDP_V5_0_H__ +#define __HDP_V5_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_hdp_funcs hdp_v5_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 37d8b6ca4dab..cc957471f31e 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -194,19 +194,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + + +out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 985e454463e1..7f30629f21a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev) return r; } - memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size); + memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size); amdgpu_bo_kunmap(adev->mes.eop_gpu_obj); amdgpu_bo_unreserve(adev->mes.eop_gpu_obj); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index 1961745e89c7..ab9be5ad5a5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -531,12 +531,12 @@ mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; - data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); - data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 7767ccca526b..3ee481557fc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -255,6 +255,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) if (!down_read_trylock(&adev->reset_sem)) return; + amdgpu_virt_fini_data_exchange(adev); atomic_set(&adev->in_gpu_reset, 1); do { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index dd5c1e6ce009..48e588d3c409 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -276,6 +276,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) if (!down_read_trylock(&adev->reset_sem)) return; + amdgpu_virt_fini_data_exchange(adev); atomic_set(&adev->in_gpu_reset, 1); do { diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 7ba229e43799..f4e4040bbd25 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -40,6 +40,53 @@ static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev); /** + * navi10_ih_init_register_offset - Initialize register offset for ih rings + * + * @adev: amdgpu_device pointer + * + * Initialize register offset ih rings (NAVI10). + */ +static void navi10_ih_init_register_offset(struct amdgpu_device *adev) +{ + struct amdgpu_ih_regs *ih_regs; + + if (adev->irq.ih.ring_size) { + ih_regs = &adev->irq.ih.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR); + ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO); + ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL; + } + + if (adev->irq.ih1.ring_size) { + ih_regs = &adev->irq.ih1.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1; + } + + if (adev->irq.ih2.ring_size) { + ih_regs = &adev->irq.ih2.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2; + } +} + +/** * force_update_wptr_for_self_int - Force update the wptr for self interrupt * * @adev: amdgpu_device pointer @@ -82,133 +129,66 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev, } /** - * navi10_ih_enable_interrupts - Enable the interrupt ring buffer + * navi10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointet + * @enable: true - enable the interrupts, false - disable the interrupts * - * Enable the interrupt ring buffer (NAVI10). + * Toggle the interrupt ring buffer (NAVI10) */ -static void navi10_ih_enable_interrupts(struct amdgpu_device *adev) +static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + bool enable) { - u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); - } + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; - adev->irq.ih.enabled = true; + ih_regs = &ih->ih_regs; - if (adev->irq.ih1.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, - RB_ENABLE, 1); - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); - } - adev->irq.ih1.enabled = true; - } + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); + WREG32(ih_regs->ih_rb_cntl, tmp); - if (adev->irq.ih2.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, - RB_ENABLE, 1); - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); - } - adev->irq.ih2.enabled = true; + if (enable) { + ih->enabled = true; + } else { + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_rptr, 0); + WREG32(ih_regs->ih_rb_wptr, 0); + ih->enabled = false; + ih->rptr = 0; } - if (adev->irq.ih_soft.ring_size) - adev->irq.ih_soft.enabled = true; + return 0; } /** - * navi10_ih_disable_interrupts - Disable the interrupt ring buffer + * navi10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers * * @adev: amdgpu_device pointer + * @enable: enable or disable interrupt ring buffers * - * Disable the interrupt ring buffer (NAVI10). + * Toggle all the available interrupt ring buffers (NAVI10). */ -static void navi10_ih_disable_interrupts(struct amdgpu_device *adev) +static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable) { - u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); - } - - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - adev->irq.ih.enabled = false; - adev->irq.ih.rptr = 0; - - if (adev->irq.ih1.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, - RB_ENABLE, 0); - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); - } - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); - adev->irq.ih1.enabled = false; - adev->irq.ih1.rptr = 0; - } + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + int i; + int r; - if (adev->irq.ih2.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, - RB_ENABLE, 0); - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable); + if (r) + return r; } - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); - adev->irq.ih2.enabled = false; - adev->irq.ih2.rptr = 0; } + return 0; } static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) @@ -253,22 +233,49 @@ static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) return ih_doorbell_rtpr; } -static void navi10_ih_reroute_ih(struct amdgpu_device *adev) +/** + * navi10_ih_enable_ring - enable an ih ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Enable an ih ring buffer (NAVI10) + */ +static int navi10_ih_enable_ring(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { + struct amdgpu_ih_regs *ih_regs; uint32_t tmp; - /* Reroute to IH ring 1 for VMC */ - WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); - - /* Reroute IH ring 1 for UMC */ - WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); + ih_regs = &ih->ih_regs; + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8); + WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff); + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = navi10_ih_rb_cntl(ih, tmp); + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (ih == &adev->irq.ih1) { + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); + } + WREG32(ih_regs->ih_rb_cntl, tmp); + + if (ih == &adev->irq.ih) { + /* set the ih ring 0 writeback address whether it's enabled or not */ + WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr)); + WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF); + } + + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_wptr, 0); + WREG32(ih_regs->ih_rb_rptr, 0); + + WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih)); + + return 0; } /** @@ -284,36 +291,21 @@ static void navi10_ih_reroute_ih(struct amdgpu_device *adev) */ static int navi10_ih_irq_init(struct amdgpu_device *adev) { - struct amdgpu_ih_ring *ih = &adev->irq.ih; - u32 ih_rb_cntl, ih_chicken; + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + u32 ih_chicken; u32 tmp; + int ret; + int i; /* disable irqs */ - navi10_ih_disable_interrupts(adev); + ret = navi10_ih_toggle_interrupts(adev, false); + if (ret) + return ret; adev->nbio.funcs->ih_control(adev); - /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, - !!adev->irq.msi_enabled); - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); - return -ETIMEDOUT; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); - } - if (adev->irq.ih1.ring_size) - navi10_ih_reroute_ih(adev); - if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) { - if (ih->use_bus_addr) { + if (ih[0]->use_bus_addr) { switch (adev->asic_type) { case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: @@ -334,77 +326,17 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) } } - /* set the writeback address whether it's enabled or not */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, - lower_32_bits(ih->wptr_addr)); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, - upper_32_bits(ih->wptr_addr) & 0xFFFF); - - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, - navi10_ih_doorbell_rptr(ih)); - - adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell, - ih->doorbell_index); - - ih = &adev->irq.ih1; - if (ih->ring_size) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1, - (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, - WPTR_OVERFLOW_ENABLE, 0); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, - RB_FULL_DRAIN_ENABLE, 1); - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return -ETIMEDOUT; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + ret = navi10_ih_enable_ring(adev, ih[i]); + if (ret) + return ret; } - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); - - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1, - navi10_ih_doorbell_rptr(ih)); - } - - ih = &adev->irq.ih2; - if (ih->ring_size) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2, - (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl); - - if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return -ETIMEDOUT; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); - } - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); - - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2, - navi10_ih_doorbell_rptr(ih)); } + /* update doorbell range for ih ring 0*/ + adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, + ih[0]->doorbell_index); tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, @@ -418,10 +350,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) pci_set_master(adev->pdev); /* enable interrupts */ - navi10_ih_enable_interrupts(adev); + ret = navi10_ih_toggle_interrupts(adev, true); + if (ret) + return ret; /* enable wptr force update for self int */ force_update_wptr_for_self_int(adev, 0, 8, true); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -435,7 +372,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) static void navi10_ih_irq_disable(struct amdgpu_device *adev) { force_update_wptr_for_self_int(adev, 0, 8, false); - navi10_ih_disable_interrupts(adev); + navi10_ih_toggle_interrupts(adev, false); /* Wait and acknowledge irq */ mdelay(1); @@ -455,23 +392,16 @@ static void navi10_ih_irq_disable(struct amdgpu_device *adev) static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - u32 wptr, reg, tmp; + u32 wptr, tmp; + struct amdgpu_ih_regs *ih_regs; wptr = le32_to_cpu(*ih->wptr_cpu); + ih_regs = &ih->ih_regs; if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; - if (ih == &adev->irq.ih) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); - else if (ih == &adev->irq.ih1) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); - else if (ih == &adev->irq.ih2) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); - else - BUG(); - - wptr = RREG32_NO_KIQ(reg); + wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); @@ -486,68 +416,14 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, wptr, ih->rptr, tmp); ih->rptr = tmp; - if (ih == &adev->irq.ih) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); - else if (ih == &adev->irq.ih1) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); - else if (ih == &adev->irq.ih2) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); - else - BUG(); - - tmp = RREG32_NO_KIQ(reg); + tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32_NO_KIQ(reg, tmp); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } /** - * navi10_ih_decode_iv - decode an interrupt vector - * - * @adev: amdgpu_device pointer - * @ih: IH ring buffer to decode - * @entry: IV entry to place decoded information into - * - * Decodes the interrupt vector at the current rptr - * position and also advance the position. - */ -static void navi10_ih_decode_iv(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih, - struct amdgpu_iv_entry *entry) -{ - /* wptr/rptr are in bytes! */ - u32 ring_index = ih->rptr >> 2; - uint32_t dw[8]; - - dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); - dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); - dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); - dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); - dw[4] = le32_to_cpu(ih->ring[ring_index + 4]); - dw[5] = le32_to_cpu(ih->ring[ring_index + 5]); - dw[6] = le32_to_cpu(ih->ring[ring_index + 6]); - dw[7] = le32_to_cpu(ih->ring[ring_index + 7]); - - entry->client_id = dw[0] & 0xff; - entry->src_id = (dw[0] >> 8) & 0xff; - entry->ring_id = (dw[0] >> 16) & 0xff; - entry->vmid = (dw[0] >> 24) & 0xf; - entry->vmid_src = (dw[0] >> 31); - entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); - entry->timestamp_src = dw[2] >> 31; - entry->pasid = dw[3] & 0xffff; - entry->pasid_src = dw[3] >> 31; - entry->src_data[0] = dw[4]; - entry->src_data[1] = dw[5]; - entry->src_data[2] = dw[6]; - entry->src_data[3] = dw[7]; - - /* wptr/rptr are in bytes! */ - ih->rptr += 32; -} - -/** * navi10_ih_irq_rearm - rearm IRQ if lost * * @adev: amdgpu_device pointer @@ -557,22 +433,15 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev, static void navi10_ih_irq_rearm(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - uint32_t reg_rptr = 0; uint32_t v = 0; uint32_t i = 0; + struct amdgpu_ih_regs *ih_regs; - if (ih == &adev->irq.ih) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); - else if (ih == &adev->irq.ih1) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); - else if (ih == &adev->irq.ih2) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); - else - return; + ih_regs = &ih->ih_regs; /* Rearm IRQ / re-write doorbell if doorbell write is lost */ for (i = 0; i < MAX_REARM_RETRY; i++) { - v = RREG32_NO_KIQ(reg_rptr); + v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr); if ((v < ih->ring_size) && (v != ih->rptr)) WDOORBELL32(ih->doorbell_index, ih->rptr); else @@ -591,6 +460,8 @@ static void navi10_ih_irq_rearm(struct amdgpu_device *adev, static void navi10_ih_set_rptr(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { + struct amdgpu_ih_regs *ih_regs; + if (ih->use_doorbell) { /* XXX check if swapping is necessary on BE */ *ih->rptr_cpu = ih->rptr; @@ -598,12 +469,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) navi10_ih_irq_rearm(adev, ih); - } else if (ih == &adev->irq.ih) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); - } else if (ih == &adev->irq.ih1) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr); - } else if (ih == &adev->irq.ih2) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr); + } else { + ih_regs = &ih->ih_regs; + WREG32(ih_regs->ih_rb_rptr, ih->rptr); } } @@ -685,23 +553,8 @@ static int navi10_ih_sw_init(void *handle) adev->irq.ih1.ring_size = 0; adev->irq.ih2.ring_size = 0; - if (adev->asic_type < CHIP_NAVI10) { - r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); - if (r) - return r; - - adev->irq.ih1.use_doorbell = true; - adev->irq.ih1.doorbell_index = - (adev->doorbell_index.ih + 1) << 1; - - r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); - if (r) - return r; - - adev->irq.ih2.use_doorbell = true; - adev->irq.ih2.doorbell_index = - (adev->doorbell_index.ih + 2) << 1; - } + /* initialize ih control registers offset */ + navi10_ih_init_register_offset(adev); r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); if (r) @@ -717,6 +570,7 @@ static int navi10_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); amdgpu_ih_ring_fini(adev, &adev->irq.ih2); amdgpu_ih_ring_fini(adev, &adev->irq.ih1); amdgpu_ih_ring_fini(adev, &adev->irq.ih); @@ -848,7 +702,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = { static const struct amdgpu_ih_funcs navi10_ih_funcs = { .get_wptr = navi10_ih_get_wptr, - .decode_iv = navi10_ih_decode_iv, + .decode_iv = amdgpu_ih_decode_iv_helper, .set_rptr = navi10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index b5c3db16c2b0..05ddec7ba7e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -34,6 +34,14 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 #define smnPCIE_LC_CNTL 0x11140280 +#define smnPCIE_LC_CNTL3 0x111402d4 +#define smnPCIE_LC_CNTL6 0x111402ec +#define smnPCIE_LC_CNTL7 0x111402f0 +#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c +#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538 +#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324 +#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4 +#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c #define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6 #define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2 @@ -80,15 +88,6 @@ static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); } -static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); -} - static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE); @@ -359,6 +358,111 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, WREG32_PCIE(smnPCIE_LC_CNTL, data); } +static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) +{ + uint32_t def, data; + + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2); + data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data); + + def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); + data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; + if (def != data) + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); +} + +static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL7); + data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL7, data); + + def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); + data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK; + if (def != data) + WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5); + data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); + + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); + + def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2); + data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | + PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; + data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL6); + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK | + PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL6, data); + + nbio_v2_3_program_ltr(adev); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; + data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5); + data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); +} + const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset, @@ -366,7 +470,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset, .get_rev_id = nbio_v2_3_get_rev_id, .mc_access_enable = nbio_v2_3_mc_access_enable, - .hdp_flush = nbio_v2_3_hdp_flush, .get_memsize = nbio_v2_3_get_memsize, .sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range, .vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range, @@ -380,4 +483,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .init_registers = nbio_v2_3_init_registers, .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, .enable_aspm = nbio_v2_3_enable_aspm, + .program_aspm = nbio_v2_3_program_aspm, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index d2f1fe55d388..83ea063388fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -29,6 +29,15 @@ #include "nbio/nbio_6_1_sh_mask.h" #include "nbio/nbio_6_1_smn.h" #include "vega10_enum.h" +#include <uapi/linux/kfd_ioctl.h> + +static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) { @@ -50,18 +59,6 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); } -static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_SOC15_NO_KIQ(NBIO, 0, - mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, - 0); - else - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0); -} - static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE); @@ -266,7 +263,6 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset, .get_rev_id = nbio_v6_1_get_rev_id, .mc_access_enable = nbio_v6_1_mc_access_enable, - .hdp_flush = nbio_v6_1_hdp_flush, .get_memsize = nbio_v6_1_get_memsize, .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range, .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture, @@ -277,4 +273,5 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { .get_clockgating_state = nbio_v6_1_get_clockgating_state, .ih_control = nbio_v6_1_ih_control, .init_registers = nbio_v6_1_init_registers, + .remap_hdp_registers = nbio_v6_1_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index ae685813c419..3c00666a13e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -60,15 +60,6 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); } -static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); -} - static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE); @@ -292,7 +283,6 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { .get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset, .get_rev_id = nbio_v7_0_get_rev_id, .mc_access_enable = nbio_v7_0_mc_access_enable, - .hdp_flush = nbio_v7_0_hdp_flush, .get_memsize = nbio_v7_0_get_memsize, .sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range, .vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index aa36022670f9..598ce0e93627 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -56,15 +56,6 @@ static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0); } -static void nbio_v7_2_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); -} - static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE); @@ -325,7 +316,6 @@ const struct amdgpu_nbio_funcs nbio_v7_2_funcs = { .get_pcie_port_data_offset = nbio_v7_2_get_pcie_port_data_offset, .get_rev_id = nbio_v7_2_get_rev_id, .mc_access_enable = nbio_v7_2_mc_access_enable, - .hdp_flush = nbio_v7_2_hdp_flush, .get_memsize = nbio_v7_2_get_memsize, .sdma_doorbell_range = nbio_v7_2_sdma_doorbell_range, .vcn_doorbell_range = nbio_v7_2_vcn_doorbell_range, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index eadc9526d33f..4bc1d1434065 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -82,15 +82,6 @@ static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); } -static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); -} - static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE); @@ -541,7 +532,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset, .get_rev_id = nbio_v7_4_get_rev_id, .mc_access_enable = nbio_v7_4_mc_access_enable, - .hdp_flush = nbio_v7_4_hdp_flush, .get_memsize = nbio_v7_4_get_memsize, .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range, .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 6bee3677394a..160fa5f59805 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -38,9 +38,6 @@ #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" -#include "hdp/hdp_5_0_0_offset.h" -#include "hdp/hdp_5_0_0_sh_mask.h" -#include "smuio/smuio_11_0_0_offset.h" #include "mp/mp_11_0_offset.h" #include "soc15.h" @@ -50,6 +47,7 @@ #include "mmhub_v2_0.h" #include "nbio_v2_3.h" #include "nbio_v7_2.h" +#include "hdp_v5_0.h" #include "nv.h" #include "navi10_ih.h" #include "gfx_v10_0.h" @@ -62,6 +60,8 @@ #include "dce_virtual.h" #include "mes_v10_1.h" #include "mxgpu_nv.h" +#include "smuio_v11_0.h" +#include "smuio_v11_0_6.h" static const struct amd_ip_funcs nv_common_ip_funcs; @@ -203,6 +203,7 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev, { u32 *dw_ptr; u32 i, length_dw; + u32 rom_index_offset, rom_data_offset; if (bios == NULL) return false; @@ -215,11 +216,16 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev, dw_ptr = (u32 *)bios; length_dw = ALIGN(length_bytes, 4) / 4; + rom_index_offset = + adev->smuio.funcs->get_rom_index_offset(adev); + rom_data_offset = + adev->smuio.funcs->get_rom_data_offset(adev); + /* set rom index to 0 */ - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + WREG32(rom_index_offset, 0); /* read out the rom data */ for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + dw_ptr[i] = RREG32(rom_data_offset); return true; } @@ -336,6 +342,38 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) return ret; } +static int nv_asic_mode2_reset(struct amdgpu_device *adev) +{ + u32 i; + int ret = 0; + + amdgpu_atombios_scratch_regs_engine_hung(adev, true); + + /* disable BM */ + pci_clear_master(adev->pdev); + + amdgpu_device_cache_pci_state(adev->pdev); + + ret = amdgpu_dpm_mode2_reset(adev); + if (ret) + dev_err(adev->dev, "GPU mode2 reset failed\n"); + + amdgpu_device_load_pci_state(adev->pdev); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + u32 memsize = adev->nbio.funcs->get_memsize(adev); + + if (memsize != 0xffffffff) + break; + udelay(1); + } + + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + + return ret; +} + static bool nv_asic_supports_baco(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; @@ -352,7 +390,9 @@ nv_asic_reset_method(struct amdgpu_device *adev) struct smu_context *smu = &adev->smu; if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || - amdgpu_reset_method == AMD_RESET_METHOD_BACO) + amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || + amdgpu_reset_method == AMD_RESET_METHOD_BACO || + amdgpu_reset_method == AMD_RESET_METHOD_PCI) return amdgpu_reset_method; if (amdgpu_reset_method != -1) @@ -360,6 +400,8 @@ nv_asic_reset_method(struct amdgpu_device *adev) amdgpu_reset_method); switch (adev->asic_type) { + case CHIP_VANGOGH: + return AMD_RESET_METHOD_MODE2; case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: @@ -377,7 +419,16 @@ static int nv_asic_reset(struct amdgpu_device *adev) int ret = 0; struct smu_context *smu = &adev->smu; - if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + /* skip reset on vangogh for now */ + if (adev->asic_type == CHIP_VANGOGH) + return 0; + + switch (nv_asic_reset_method(adev)) { + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + ret = amdgpu_device_pci_reset(adev); + break; + case AMD_RESET_METHOD_BACO: dev_info(adev->dev, "BACO reset\n"); ret = smu_baco_enter(smu); @@ -386,9 +437,15 @@ static int nv_asic_reset(struct amdgpu_device *adev) ret = smu_baco_exit(smu); if (ret) return ret; - } else { + break; + case AMD_RESET_METHOD_MODE2: + dev_info(adev->dev, "MODE2 reset\n"); + ret = nv_asic_mode2_reset(adev); + break; + default: dev_info(adev->dev, "MODE1 reset\n"); ret = nv_asic_mode1_reset(adev); + break; } return ret; @@ -423,11 +480,14 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev) static void nv_program_aspm(struct amdgpu_device *adev) { - - if (amdgpu_aspm == 0) + if (amdgpu_aspm != 1) return; - /* todo */ + if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && + !(adev->flags & AMD_IS_APU) && + (adev->nbio.funcs->program_aspm)) + adev->nbio.funcs->program_aspm(adev); + } static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, @@ -514,6 +574,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) adev->nbio.funcs = &nbio_v2_3_funcs; adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; } + adev->hdp.funcs = &hdp_v5_0_funcs; + + if (adev->asic_type >= CHIP_SIENNA_CICHLID) + adev->smuio.funcs = &smuio_v11_0_6_funcs; + else + adev->smuio.funcs = &smuio_v11_0_funcs; if (adev->asic_type == CHIP_SIENNA_CICHLID) adev->gmc.xgmi.supported = true; @@ -669,22 +735,6 @@ static uint32_t nv_get_rev_id(struct amdgpu_device *adev) return adev->nbio.funcs->get_rev_id(adev); } -static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) -{ - adev->nbio.funcs->hdp_flush(adev, ring); -} - -static void nv_invalidate_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) { - WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); - } else { - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); - } -} - static bool nv_need_full_reset(struct amdgpu_device *adev) { return true; @@ -768,10 +818,10 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, * The ASPM function is not fully enabled and verified on * Navi yet. Temporarily skip this until ASPM enabled. */ -#if 0 - if (adev->nbio.funcs->enable_aspm) + if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && + !(adev->flags & AMD_IS_APU) && + (adev->nbio.funcs->enable_aspm)) adev->nbio.funcs->enable_aspm(adev, !enter); -#endif return 0; } @@ -788,8 +838,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .set_uvd_clocks = &nv_set_uvd_clocks, .set_vce_clocks = &nv_set_vce_clocks, .get_config_memsize = &nv_get_config_memsize, - .flush_hdp = &nv_flush_hdp, - .invalidate_hdp = &nv_invalidate_hdp, .init_doorbell_index = &nv_init_doorbell_index, .need_full_reset = &nv_need_full_reset, .need_reset_on_init = &nv_need_reset_on_init, @@ -1080,120 +1128,6 @@ static int nv_common_soft_reset(void *handle) return 0; } -static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, - bool enable) -{ - uint32_t hdp_clk_cntl, hdp_clk_cntl1; - uint32_t hdp_mem_pwr_cntl; - - if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | - AMD_CG_SUPPORT_HDP_DS | - AMD_CG_SUPPORT_HDP_SD))) - return; - - hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); - hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); - - /* Before doing clock/power mode switch, - * forced on IPH & RC clock */ - hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, - IPH_MEM_CLK_SOFT_OVERRIDE, 1); - hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, - RC_MEM_CLK_SOFT_OVERRIDE, 1); - WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); - - /* HDP 5.0 doesn't support dynamic power mode switch, - * disable clock and power gating before any changing */ - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_CTRL_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_LS_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_DS_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_SD_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_CTRL_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_LS_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_DS_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_SD_EN, 0); - WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); - - /* only one clock gating mode (LS/DS/SD) can be enabled */ - if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_LS_EN, enable); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - RC_MEM_POWER_LS_EN, enable); - } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_DS_EN, enable); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - RC_MEM_POWER_DS_EN, enable); - } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_SD_EN, enable); - /* RC should not use shut down mode, fallback to ds */ - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - RC_MEM_POWER_DS_EN, enable); - } - - /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to - * be set for SRAM LS/DS/SD */ - if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS | - AMD_CG_SUPPORT_HDP_SD)) { - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_CTRL_EN, 1); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_CTRL_EN, 1); - } - - WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); - - /* restore IPH & RC clock override after clock/power mode changing */ - WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); -} - -static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, - bool enable) -{ - uint32_t hdp_clk_cntl; - - if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) - return; - - hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); - - if (enable) { - hdp_clk_cntl &= - ~(uint32_t) - (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); - } else { - hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; - } - - WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); -} - static int nv_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1213,9 +1147,9 @@ static int nv_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE); - nv_update_hdp_mem_power_gating(adev, - state == AMD_CG_STATE_GATE); - nv_update_hdp_clock_gating(adev, + adev->hdp.funcs->update_clock_gating(adev, + state == AMD_CG_STATE_GATE); + adev->smuio.funcs->update_rom_clock_gating(adev, state == AMD_CG_STATE_GATE); break; default: @@ -1234,31 +1168,15 @@ static int nv_common_set_powergating_state(void *handle, static void nv_common_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - uint32_t tmp; if (amdgpu_sriov_vf(adev)) *flags = 0; adev->nbio.funcs->get_clockgating_state(adev, flags); - /* AMD_CG_SUPPORT_HDP_MGCG */ - tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); - if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) - *flags |= AMD_CG_SUPPORT_HDP_MGCG; - - /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ - tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); - if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) - *flags |= AMD_CG_SUPPORT_HDP_LS; - else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) - *flags |= AMD_CG_SUPPORT_HDP_DS; - else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) - *flags |= AMD_CG_SUPPORT_HDP_SD; + adev->hdp.funcs->get_clock_gating_state(adev, flags); + + adev->smuio.funcs->get_clock_gating_state(adev, flags); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index d7f92634eba2..4b1cc5e9ee92 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -92,8 +92,6 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version); adev->psp.ta_dtm_ucode_size = @@ -101,6 +99,16 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr + le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + + adev->psp.ta_securedisplay_ucode_version = + le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version); + adev->psp.ta_securedisplay_ucode_size = + le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes); + adev->psp.ta_securedisplay_start_addr = + (uint8_t *)adev->psp.ta_hdcp_start_addr + + le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index bd4248c93c49..c325d6f53a71 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -392,37 +392,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) return ret; } -static void psp_v11_0_reroute_ih(struct psp_context *psp) -{ - struct amdgpu_device *adev = psp->adev; - uint32_t tmp; - - /* Change IH ring for VMC */ - tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET); - - mdelay(20); - psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); - - /* Change IH ring for UMC */ - tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET); - - mdelay(20); - psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); -} - static int psp_v11_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -430,11 +399,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp, struct psp_ring *ring; struct amdgpu_device *adev = psp->adev; - if ((!amdgpu_sriov_vf(adev)) && - !(adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) - psp_v11_0_reroute_ih(psp); - ring = &psp->km_ring; ring->ring_type = ring_type; @@ -726,7 +690,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) } memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); vfree(buf); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index ce56e93c6886..c8c22c1d1e65 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -46,7 +46,6 @@ #include "sdma6/sdma6_4_2_2_sh_mask.h" #include "sdma7/sdma7_4_2_2_offset.h" #include "sdma7/sdma7_4_2_2_sh_mask.h" -#include "hdp/hdp_4_0_offset.h" #include "sdma0/sdma0_4_1_default.h" #include "soc15_common.h" diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index b208b81005bb..d345e324837d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -32,7 +32,6 @@ #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" -#include "hdp/hdp_5_0_0_offset.h" #include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h" #include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h" diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index f1ba36a094da..690a5090475a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -119,15 +119,7 @@ static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst) static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev) { - int i; - - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - - if (adev->asic_type == CHIP_SIENNA_CICHLID) - break; - } + release_firmware(adev->sdma.instance[0].fw); memset((void *)adev->sdma.instance, 0, sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES); @@ -185,23 +177,10 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) if (err) goto out; - for (i = 1; i < adev->sdma.num_instances; i++) { - if (adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_DIMGREY_CAVEFISH) { - memcpy((void *)&adev->sdma.instance[i], - (void *)&adev->sdma.instance[0], - sizeof(struct amdgpu_sdma_instance)); - } else { - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i); - err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); - if (err) - goto out; - - err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]); - if (err) - goto out; - } - } + for (i = 1; i < adev->sdma.num_instances; i++) + memcpy((void *)&adev->sdma.instance[i], + (void *)&adev->sdma.instance[0], + sizeof(struct amdgpu_sdma_instance)); DRM_DEBUG("psp_load == '%s'\n", adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 3cf0589bfea5..6b5cf7882a12 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1270,7 +1270,7 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev) u32 i; int r = -EINVAL; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); /* set mclk/sclk to bypass */ si_set_clk_bypass_mode(adev); @@ -1294,20 +1294,6 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev) } udelay(1); } - - return r; -} - -static int si_asic_reset(struct amdgpu_device *adev) -{ - int r; - - dev_info(adev->dev, "PCI CONFIG reset\n"); - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - r = si_gpu_pci_config_reset(adev); - amdgpu_atombios_scratch_regs_engine_hung(adev, false); return r; @@ -1321,14 +1307,34 @@ static bool si_asic_supports_baco(struct amdgpu_device *adev) static enum amd_reset_method si_asic_reset_method(struct amdgpu_device *adev) { - if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY && - amdgpu_reset_method != -1) + if (amdgpu_reset_method == AMD_RESET_METHOD_PCI) + return amdgpu_reset_method; + else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY && + amdgpu_reset_method != -1) dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", - amdgpu_reset_method); + amdgpu_reset_method); return AMD_RESET_METHOD_LEGACY; } +static int si_asic_reset(struct amdgpu_device *adev) +{ + int r; + + switch (si_asic_reset_method(adev)) { + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + r = amdgpu_device_pci_reset(adev); + break; + default: + dev_info(adev->dev, "PCI CONFIG reset\n"); + r = si_gpu_pci_config_reset(adev); + break; + } + + return r; +} + static u32 si_get_config_memsize(struct amdgpu_device *adev) { return RREG32(mmCONFIG_MEMSIZE); diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c new file mode 100644 index 000000000000..3a18dbb55c32 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c @@ -0,0 +1,77 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v11_0_6.h" +#include "smuio/smuio_11_0_6_offset.h" +#include "smuio/smuio_11_0_6_sh_mask.h" + +static u32 smuio_v11_0_6_get_rom_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); +} + +static u32 smuio_v11_0_6_get_rom_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); +} + +static void smuio_v11_0_6_update_rom_clock_gating(struct amdgpu_device *adev, bool enable) +{ + u32 def, data; + + /* enable/disable ROM CG is not supported on APU */ + if (adev->flags & AMD_IS_APU) + return; + + def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) + data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); + else + data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; + + if (def != data) + WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data); +} + +static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags) +{ + u32 data; + + /* CGTT_ROM_CLK_CTRL0 is not available for APU */ + if (adev->flags & AMD_IS_APU) + return; + + data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) + *flags |= AMD_CG_SUPPORT_ROM_MGCG; +} + +const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs = { + .get_rom_index_offset = smuio_v11_0_6_get_rom_index_offset, + .get_rom_data_offset = smuio_v11_0_6_get_rom_data_offset, + .update_rom_clock_gating = smuio_v11_0_6_update_rom_clock_gating, + .get_clock_gating_state = smuio_v11_0_6_get_clock_gating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h new file mode 100644 index 000000000000..3c3f4ab0bc9b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h @@ -0,0 +1,30 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V11_0_6_H__ +#define __SMUIO_V11_0_6_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs; + +#endif /* __SMUIO_V11_0_6_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0b3516c4eefb..1221aa6b40a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -40,8 +40,6 @@ #include "gc/gc_9_0_sh_mask.h" #include "sdma0/sdma0_4_0_offset.h" #include "sdma1/sdma1_4_0_offset.h" -#include "hdp/hdp_4_0_offset.h" -#include "hdp/hdp_4_0_sh_mask.h" #include "nbio/nbio_7_0_default.h" #include "nbio/nbio_7_0_offset.h" #include "nbio/nbio_7_0_sh_mask.h" @@ -59,7 +57,9 @@ #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" +#include "hdp_v4_0.h" #include "vega10_ih.h" +#include "vega20_ih.h" #include "navi10_ih.h" #include "sdma_v4_0.h" #include "uvd_v7_0.h" @@ -83,14 +83,6 @@ #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 -/* for Vega20 register name change */ -#define mmHDP_MEM_POWER_CTRL 0x00d4 -#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L -#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L -#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L -#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L -#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 - /* * Indirect registers accessor */ @@ -241,6 +233,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) { u32 reference_clock = adev->clock.spll.reference_freq; + if (adev->asic_type == CHIP_RENOIR) + return 10000; if (adev->asic_type == CHIP_RAVEN) return reference_clock / 4; @@ -487,7 +481,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || - amdgpu_reset_method == AMD_RESET_METHOD_BACO) + amdgpu_reset_method == AMD_RESET_METHOD_BACO || + amdgpu_reset_method == AMD_RESET_METHOD_PCI) return amdgpu_reset_method; if (amdgpu_reset_method != -1) @@ -532,15 +527,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev) return 0; switch (soc15_asic_reset_method(adev)) { - case AMD_RESET_METHOD_BACO: - dev_info(adev->dev, "BACO reset\n"); - return soc15_asic_baco_reset(adev); - case AMD_RESET_METHOD_MODE2: - dev_info(adev->dev, "MODE2 reset\n"); - return amdgpu_dpm_mode2_reset(adev); - default: - dev_info(adev->dev, "MODE1 reset\n"); - return soc15_asic_mode1_reset(adev); + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + return amdgpu_device_pci_reset(adev); + case AMD_RESET_METHOD_BACO: + dev_info(adev->dev, "BACO reset\n"); + return soc15_asic_baco_reset(adev); + case AMD_RESET_METHOD_MODE2: + dev_info(adev->dev, "MODE2 reset\n"); + return amdgpu_dpm_mode2_reset(adev); + default: + dev_info(adev->dev, "MODE1 reset\n"); + return soc15_asic_mode1_reset(adev); } } @@ -699,6 +697,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) adev->nbio.funcs = &nbio_v6_1_funcs; adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; } + adev->hdp.funcs = &hdp_v4_0_funcs; if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->df.funcs = &df_v3_6_funcs; @@ -729,12 +728,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); } if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); else amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); } else { if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); else amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { @@ -787,9 +786,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) { if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); } else { - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); } @@ -834,35 +833,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) return 0; } -static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) -{ - adev->nbio.funcs->hdp_flush(adev, ring); -} - -static void soc15_invalidate_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); - else - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); -} - static bool soc15_need_full_reset(struct amdgpu_device *adev) { /* change this when we implement soft reset */ return true; } -static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev) -{ - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) - return; - /*read back hdp ras counter to reset it to 0 */ - RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); -} - static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1) { @@ -1011,8 +987,6 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .set_uvd_clocks = &soc15_set_uvd_clocks, .set_vce_clocks = &soc15_set_vce_clocks, .get_config_memsize = &soc15_get_config_memsize, - .flush_hdp = &soc15_flush_hdp, - .invalidate_hdp = &soc15_invalidate_hdp, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega10_doorbell_index_init, .get_pcie_usage = &soc15_get_pcie_usage, @@ -1034,9 +1008,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .set_uvd_clocks = &soc15_set_uvd_clocks, .set_vce_clocks = &soc15_set_vce_clocks, .get_config_memsize = &soc15_get_config_memsize, - .flush_hdp = &soc15_flush_hdp, - .invalidate_hdp = &soc15_invalidate_hdp, - .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega20_doorbell_index_init, .get_pcie_usage = &vega20_get_pcie_usage, @@ -1294,9 +1265,8 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); - if (adev->asic_funcs && - adev->asic_funcs->reset_hdp_ras_error_count) - adev->asic_funcs->reset_hdp_ras_error_count(adev); + if (adev->hdp.funcs->reset_ras_error_count) + adev->hdp.funcs->reset_ras_error_count(adev); if (adev->nbio.funcs->ras_late_init) r = adev->nbio.funcs->ras_late_init(adev); @@ -1422,41 +1392,6 @@ static int soc15_common_soft_reset(void *handle) return 0; } -static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) -{ - uint32_t def, data; - - if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_RENOIR) { - def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) - data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | - HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | - HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | - HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; - else - data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | - HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | - HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | - HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); - - if (def != data) - WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); - } else { - def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) - data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; - else - data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; - - if (def != data) - WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); - } -} - static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) { uint32_t def, data; @@ -1517,7 +1452,7 @@ static int soc15_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE); - soc15_update_hdp_light_sleep(adev, + adev->hdp.funcs->update_clock_gating(adev, state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, state == AMD_CG_STATE_GATE); @@ -1534,7 +1469,7 @@ static int soc15_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE); - soc15_update_hdp_light_sleep(adev, + adev->hdp.funcs->update_clock_gating(adev, state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, state == AMD_CG_STATE_GATE); @@ -1542,7 +1477,7 @@ static int soc15_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE); break; case CHIP_ARCTURUS: - soc15_update_hdp_light_sleep(adev, + adev->hdp.funcs->update_clock_gating(adev, state == AMD_CG_STATE_GATE); break; default: @@ -1561,10 +1496,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) adev->nbio.funcs->get_clockgating_state(adev, flags); - /* AMD_CG_SUPPORT_HDP_LS */ - data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); - if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) - *flags |= AMD_CG_SUPPORT_HDP_LS; + adev->hdp.funcs->get_clock_gating_state(adev, flags); /* AMD_CG_SUPPORT_DRM_MGCG */ data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h new file mode 100644 index 000000000000..5039375bb1d4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h @@ -0,0 +1,154 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _TA_SECUREDISPLAY_IF_H +#define _TA_SECUREDISPLAY_IF_H + +/** Secure Display related enumerations */ +/**********************************************************/ + +/** @enum ta_securedisplay_command + * Secure Display Command ID + */ +enum ta_securedisplay_command { + /* Query whether TA is responding used only for validation purpose */ + TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1, + /* Send region of Interest and CRC value to I2C */ + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2, + /* Maximum Command ID */ + TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF, +}; + +/** @enum ta_securedisplay_status + * Secure Display status returns in shared buffer status + */ +enum ta_securedisplay_status { + TA_SECUREDISPLAY_STATUS__SUCCESS = 0x00, /* Success */ + TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE = 0x01, /* Generic Failure */ + TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER = 0x02, /* Invalid Parameter */ + TA_SECUREDISPLAY_STATUS__NULL_POINTER = 0x03, /* Null Pointer*/ + TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR = 0x04, /* Fail to Write to I2C */ + TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR = 0x05, /*Fail Read DIO Scratch Register*/ + TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR = 0x06, /* Fail to Read CRC*/ + + TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/ +}; + +/** @enum ta_securedisplay_max_phy + * Physical ID number to use for reading corresponding DIO Scratch register for ROI + */ +enum ta_securedisplay_max_phy { + TA_SECUREDISPLAY_PHY0 = 0, + TA_SECUREDISPLAY_PHY1 = 1, + TA_SECUREDISPLAY_PHY2 = 2, + TA_SECUREDISPLAY_PHY3 = 3, + TA_SECUREDISPLAY_MAX_PHY = 4, +}; + +/** @enum ta_securedisplay_ta_query_cmd_ret + * A predefined specific reteurn value which is 0xAB only used to validate + * communication to Secure Display TA is functional. + * This value is used to validate whether TA is responding successfully + */ +enum ta_securedisplay_ta_query_cmd_ret { + /* This is a value to validate if TA is loaded successfully */ + TA_SECUREDISPLAY_QUERY_CMD_RET = 0xAB, +}; + +/** @enum ta_securedisplay_buffer_size + * I2C Buffer size which contains 8 bytes of ROI (X start, X end, Y start, Y end) + * and 6 bytes of CRC( R,G,B) and 1 byte for physical ID + */ +enum ta_securedisplay_buffer_size { + /* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */ + TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15, +}; + +/** Input/output structures for Secure Display commands */ +/**********************************************************/ +/** + * Input structures + */ + +/** @struct ta_securedisplay_send_roi_crc_input + * Physical ID to determine which DIO scratch register should be used to get ROI + */ +struct ta_securedisplay_send_roi_crc_input { + uint32_t phy_id; /* Physical ID */ +}; + +/** @union ta_securedisplay_cmd_input + * Input buffer + */ +union ta_securedisplay_cmd_input { + /* send ROI and CRC input buffer format */ + struct ta_securedisplay_send_roi_crc_input send_roi_crc; + uint32_t reserved[4]; +}; + +/** + * Output structures + */ + +/** @struct ta_securedisplay_query_ta_output + * Output buffer format for query TA whether TA is responding used only for validation purpose + */ +struct ta_securedisplay_query_ta_output { + /* return value from TA when it is queried for validation purpose only */ + uint32_t query_cmd_ret; +}; + +/** @struct ta_securedisplay_send_roi_crc_output + * Output buffer format for send ROI CRC command which will pass I2c buffer created inside TA + * and used to write to I2C used only for validation purpose + */ +struct ta_securedisplay_send_roi_crc_output { + uint8_t i2c_buf[TA_SECUREDISPLAY_I2C_BUFFER_SIZE]; /* I2C buffer */ + uint8_t reserved; +}; + +/** @union ta_securedisplay_cmd_output + * Output buffer + */ +union ta_securedisplay_cmd_output { + /* Query TA output buffer format used only for validation purpose*/ + struct ta_securedisplay_query_ta_output query_ta; + /* Send ROI CRC output buffer format used only for validation purpose */ + struct ta_securedisplay_send_roi_crc_output send_roi_crc; + uint32_t reserved[4]; +}; + +/** @struct securedisplay_cmd + * Secure Display Command which is shared buffer memory + */ +struct securedisplay_cmd { + uint32_t cmd_id; /* +0 Bytes Command ID */ + enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */ + uint32_t reserved[2]; /* +8 Bytes Reserved */ + union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */ + union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */ + /**@note Total 48 Bytes */ +}; + +#endif //_TA_SECUREDISPLAY_IF_H + diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index ce3319993b4b..249fcbee7871 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -196,19 +196,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + +out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 312ecf6d24a0..7cd67cb2ac5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -36,7 +36,6 @@ #include "vce/vce_4_0_default.h" #include "vce/vce_4_0_sh_mask.h" #include "nbif/nbif_6_1_offset.h" -#include "hdp/hdp_4_0_offset.h" #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h" diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index c734e31a9e65..6117931fa8d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -32,7 +32,6 @@ #include "vcn/vcn_1_0_offset.h" #include "vcn/vcn_1_0_sh_mask.h" -#include "hdp/hdp_4_0_offset.h" #include "mmhub/mmhub_9_1_offset.h" #include "mmhub/mmhub_9_1_sh_mask.h" diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index e5ae31eb744e..88626d83e07b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -38,132 +38,120 @@ static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev); /** - * vega10_ih_enable_interrupts - Enable the interrupt ring buffer + * vega10_ih_init_register_offset - Initialize register offset for ih rings * * @adev: amdgpu_device pointer * - * Enable the interrupt ring buffer (VEGA10). + * Initialize register offset ih rings (VEGA10). */ -static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) +static void vega10_ih_init_register_offset(struct amdgpu_device *adev) { - u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + struct amdgpu_ih_regs *ih_regs; + + if (adev->irq.ih.ring_size) { + ih_regs = &adev->irq.ih.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR); + ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO); + ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL; } - adev->irq.ih.enabled = true; if (adev->irq.ih1.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, - RB_ENABLE, 1); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); - } - adev->irq.ih1.enabled = true; + ih_regs = &adev->irq.ih1.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1; } if (adev->irq.ih2.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, - RB_ENABLE, 1); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); - } - adev->irq.ih2.enabled = true; + ih_regs = &adev->irq.ih2.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2; } - - if (adev->irq.ih_soft.ring_size) - adev->irq.ih_soft.enabled = true; } /** - * vega10_ih_disable_interrupts - Disable the interrupt ring buffer + * vega10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointet + * @enable: true - enable the interrupts, false - disable the interrupts * - * Disable the interrupt ring buffer (VEGA10). + * Toggle the interrupt ring buffer (VEGA10) */ -static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) +static int vega10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + bool enable) { - u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); + ih_regs = &ih->ih_regs; + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); - return; + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; } } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + WREG32(ih_regs->ih_rb_cntl, tmp); } - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - adev->irq.ih.enabled = false; - adev->irq.ih.rptr = 0; - - if (adev->irq.ih1.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, - RB_ENABLE, 0); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); - } + if (enable) { + ih->enabled = true; + } else { /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); - adev->irq.ih1.enabled = false; - adev->irq.ih1.rptr = 0; + WREG32(ih_regs->ih_rb_rptr, 0); + WREG32(ih_regs->ih_rb_wptr, 0); + ih->enabled = false; + ih->rptr = 0; } - if (adev->irq.ih2.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, - RB_ENABLE, 0); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); - } + return 0; +} - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); - adev->irq.ih2.enabled = false; - adev->irq.ih2.rptr = 0; +/** + * vega10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers + * + * @adev: amdgpu_device pointer + * @enable: enable or disable interrupt ring buffers + * + * Toggle all the available interrupt ring buffers (VEGA10). + */ +static int vega10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + int i; + int r; + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + r = vega10_ih_toggle_ring_interrupts(adev, ih[i], enable); + if (r) + return r; + } } + + return 0; } static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) @@ -209,6 +197,58 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) } /** + * vega10_ih_enable_ring - enable an ih ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Enable an ih ring buffer (VEGA10) + */ +static int vega10_ih_enable_ring(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8); + WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff); + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = vega10_ih_rb_cntl(ih, tmp); + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (ih == &adev->irq.ih1) { + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); + } + if (amdgpu_sriov_vf(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (ih == &adev->irq.ih) { + /* set the ih ring 0 writeback address whether it's enabled or not */ + WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr)); + WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF); + } + + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_wptr, 0); + WREG32(ih_regs->ih_rb_rptr, 0); + + WREG32(ih_regs->ih_doorbell_rptr, vega10_ih_doorbell_rptr(ih)); + + return 0; +} + +/** * vega10_ih_irq_init - init and enable the interrupt ring * * @adev: amdgpu_device pointer @@ -221,116 +261,34 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) */ static int vega10_ih_irq_init(struct amdgpu_device *adev) { - struct amdgpu_ih_ring *ih; - u32 ih_rb_cntl, ih_chicken; - int ret = 0; + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + u32 ih_chicken; + int ret; + int i; u32 tmp; /* disable irqs */ - vega10_ih_disable_interrupts(adev); + ret = vega10_ih_toggle_interrupts(adev, false); + if (ret) + return ret; adev->nbio.funcs->ih_control(adev); - ih = &adev->irq.ih; - /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, - !!adev->irq.msi_enabled); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); - return -ETIMEDOUT; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); - } - - if ((adev->asic_type == CHIP_ARCTURUS && - adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || - adev->asic_type == CHIP_RENOIR) { + if (adev->asic_type == CHIP_RENOIR) { ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); if (adev->irq.ih.use_bus_addr) { ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); - } else { - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, - MC_SPACE_FBPA_ENABLE, 1); } WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); } - /* set the writeback address whether it's enabled or not */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, - lower_32_bits(ih->wptr_addr)); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, - upper_32_bits(ih->wptr_addr) & 0xFFFF); - - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); - - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, - vega10_ih_doorbell_rptr(ih)); - - ih = &adev->irq.ih1; - if (ih->ring_size) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1, - (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, - WPTR_OVERFLOW_ENABLE, 0); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, - RB_FULL_DRAIN_ENABLE, 1); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return -ETIMEDOUT; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + ret = vega10_ih_enable_ring(adev, ih[i]); + if (ret) + return ret; } - - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); - - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1, - vega10_ih_doorbell_rptr(ih)); - } - - ih = &adev->irq.ih2; - if (ih->ring_size) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2, - (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); - - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return -ETIMEDOUT; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); - } - - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); - - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2, - vega10_ih_doorbell_rptr(ih)); } tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); @@ -345,9 +303,14 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) pci_set_master(adev->pdev); /* enable interrupts */ - vega10_ih_enable_interrupts(adev); + ret = vega10_ih_toggle_interrupts(adev, true); + if (ret) + return ret; - return ret; + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + + return 0; } /** @@ -359,7 +322,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) */ static void vega10_ih_irq_disable(struct amdgpu_device *adev) { - vega10_ih_disable_interrupts(adev); + vega10_ih_toggle_interrupts(adev, false); /* Wait and acknowledge irq */ mdelay(1); @@ -379,25 +342,17 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev) static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - u32 wptr, reg, tmp; + u32 wptr, tmp; + struct amdgpu_ih_regs *ih_regs; wptr = le32_to_cpu(*ih->wptr_cpu); + ih_regs = &ih->ih_regs; if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; /* Double check that the overflow wasn't already cleared. */ - - if (ih == &adev->irq.ih) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); - else if (ih == &adev->irq.ih1) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); - else if (ih == &adev->irq.ih2) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); - else - BUG(); - - wptr = RREG32_NO_KIQ(reg); + wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -413,69 +368,15 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, wptr, ih->rptr, tmp); ih->rptr = tmp; - if (ih == &adev->irq.ih) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); - else if (ih == &adev->irq.ih1) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); - else if (ih == &adev->irq.ih2) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); - else - BUG(); - - tmp = RREG32_NO_KIQ(reg); + tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32_NO_KIQ(reg, tmp); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } /** - * vega10_ih_decode_iv - decode an interrupt vector - * - * @adev: amdgpu_device pointer - * @ih: IH ring buffer to decode - * @entry: IV entry to place decoded information into - * - * Decodes the interrupt vector at the current rptr - * position and also advance the position. - */ -static void vega10_ih_decode_iv(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih, - struct amdgpu_iv_entry *entry) -{ - /* wptr/rptr are in bytes! */ - u32 ring_index = ih->rptr >> 2; - uint32_t dw[8]; - - dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); - dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); - dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); - dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); - dw[4] = le32_to_cpu(ih->ring[ring_index + 4]); - dw[5] = le32_to_cpu(ih->ring[ring_index + 5]); - dw[6] = le32_to_cpu(ih->ring[ring_index + 6]); - dw[7] = le32_to_cpu(ih->ring[ring_index + 7]); - - entry->client_id = dw[0] & 0xff; - entry->src_id = (dw[0] >> 8) & 0xff; - entry->ring_id = (dw[0] >> 16) & 0xff; - entry->vmid = (dw[0] >> 24) & 0xf; - entry->vmid_src = (dw[0] >> 31); - entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); - entry->timestamp_src = dw[2] >> 31; - entry->pasid = dw[3] & 0xffff; - entry->pasid_src = dw[3] >> 31; - entry->src_data[0] = dw[4]; - entry->src_data[1] = dw[5]; - entry->src_data[2] = dw[6]; - entry->src_data[3] = dw[7]; - - /* wptr/rptr are in bytes! */ - ih->rptr += 32; -} - -/** * vega10_ih_irq_rearm - rearm IRQ if lost * * @adev: amdgpu_device pointer @@ -485,22 +386,14 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev, static void vega10_ih_irq_rearm(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - uint32_t reg_rptr = 0; uint32_t v = 0; uint32_t i = 0; + struct amdgpu_ih_regs *ih_regs; - if (ih == &adev->irq.ih) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); - else if (ih == &adev->irq.ih1) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); - else if (ih == &adev->irq.ih2) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); - else - return; - + ih_regs = &ih->ih_regs; /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */ for (i = 0; i < MAX_REARM_RETRY; i++) { - v = RREG32_NO_KIQ(reg_rptr); + v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr); if ((v < ih->ring_size) && (v != ih->rptr)) WDOORBELL32(ih->doorbell_index, ih->rptr); else @@ -519,6 +412,8 @@ static void vega10_ih_irq_rearm(struct amdgpu_device *adev, static void vega10_ih_set_rptr(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { + struct amdgpu_ih_regs *ih_regs; + if (ih->use_doorbell) { /* XXX check if swapping is necessary on BE */ *ih->rptr_cpu = ih->rptr; @@ -526,12 +421,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) vega10_ih_irq_rearm(adev, ih); - } else if (ih == &adev->irq.ih) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); - } else if (ih == &adev->irq.ih1) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr); - } else if (ih == &adev->irq.ih2) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr); + } else { + ih_regs = &ih->ih_regs; + WREG32(ih_regs->ih_rb_rptr, ih->rptr); } } @@ -600,19 +492,23 @@ static int vega10_ih_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); - if (r) - return r; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); + if (r) + return r; - adev->irq.ih1.use_doorbell = true; - adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); - if (r) - return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; - adev->irq.ih2.use_doorbell = true; - adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + adev->irq.ih2.use_doorbell = true; + adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + } + /* initialize ih control registers offset */ + vega10_ih_init_register_offset(adev); r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); if (r) @@ -628,6 +524,7 @@ static int vega10_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); amdgpu_ih_ring_fini(adev, &adev->irq.ih2); amdgpu_ih_ring_fini(adev, &adev->irq.ih1); amdgpu_ih_ring_fini(adev, &adev->irq.ih); @@ -698,15 +595,11 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev, def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL); field_val = enable ? 0 : 1; /** - * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE - * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field. + * Vega10/12 and RAVEN don't have IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field. */ - if (adev->asic_type > CHIP_VEGA10) { - data = REG_SET_FIELD(data, IH_CLK_CTRL, - IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val); + if (adev->asic_type == CHIP_RENOIR) data = REG_SET_FIELD(data, IH_CLK_CTRL, IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val); - } data = REG_SET_FIELD(data, IH_CLK_CTRL, DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); @@ -759,7 +652,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = { static const struct amdgpu_ih_funcs vega10_ih_funcs = { .get_wptr = vega10_ih_get_wptr, - .decode_iv = vega10_ih_decode_iv, + .decode_iv = amdgpu_ih_decode_iv_helper, .set_rptr = vega10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c new file mode 100644 index 000000000000..5a3c867d5881 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -0,0 +1,703 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/pci.h> + +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "soc15.h" + +#include "oss/osssys_4_2_0_offset.h" +#include "oss/osssys_4_2_0_sh_mask.h" + +#include "soc15_common.h" +#include "vega20_ih.h" + +#define MAX_REARM_RETRY 10 + +static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev); + +/** + * vega20_ih_init_register_offset - Initialize register offset for ih rings + * + * @adev: amdgpu_device pointer + * + * Initialize register offset ih rings (VEGA20). + */ +static void vega20_ih_init_register_offset(struct amdgpu_device *adev) +{ + struct amdgpu_ih_regs *ih_regs; + + if (adev->irq.ih.ring_size) { + ih_regs = &adev->irq.ih.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR); + ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO); + ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL; + } + + if (adev->irq.ih1.ring_size) { + ih_regs = &adev->irq.ih1.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1; + } + + if (adev->irq.ih2.ring_size) { + ih_regs = &adev->irq.ih2.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2; + } +} + +/** + * vega20_ih_toggle_ring_interrupts - toggle the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * @enable: true - enable the interrupts, false - disable the interrupts + * + * Toggle the interrupt ring buffer (VEGA20) + */ +static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + bool enable) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); + if (amdgpu_sriov_vf(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (enable) { + ih->enabled = true; + } else { + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_rptr, 0); + WREG32(ih_regs->ih_rb_wptr, 0); + ih->enabled = false; + ih->rptr = 0; + } + + return 0; +} + +/** + * vega20_ih_toggle_interrupts - Toggle all the available interrupt ring buffers + * + * @adev: amdgpu_device pointer + * @enable: enable or disable interrupt ring buffers + * + * Toggle all the available interrupt ring buffers (VEGA20). + */ +static int vega20_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + int i; + int r; + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + r = vega20_ih_toggle_ring_interrupts(adev, ih[i], enable); + if (r) + return r; + } + } + + return 0; +} + +static uint32_t vega20_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) +{ + int rb_bufsz = order_base_2(ih->ring_size / 4); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + MC_SPACE, ih->use_bus_addr ? 1 : 4); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_CLEAR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); + /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register + * value is written to memory + */ + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_WRITEBACK_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); + + return ih_rb_cntl; +} + +static uint32_t vega20_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) +{ + u32 ih_doorbell_rtpr = 0; + + if (ih->use_doorbell) { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, OFFSET, + ih->doorbell_index); + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 1); + } else { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 0); + } + return ih_doorbell_rtpr; +} + +/** + * vega20_ih_enable_ring - enable an ih ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Enable an ih ring buffer (VEGA20) + */ +static int vega20_ih_enable_ring(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8); + WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff); + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = vega20_ih_rb_cntl(ih, tmp); + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (ih == &adev->irq.ih1) { + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); + } + if (amdgpu_sriov_vf(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (ih == &adev->irq.ih) { + /* set the ih ring 0 writeback address whether it's enabled or not */ + WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr)); + WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF); + } + + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_wptr, 0); + WREG32(ih_regs->ih_rb_rptr, 0); + + WREG32(ih_regs->ih_doorbell_rptr, vega20_ih_doorbell_rptr(ih)); + + return 0; +} + +/** + * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring + * + * @adev: amdgpu_device pointer + * + * Reroute VMC and UMC interrupts on primary ih ring to + * ih ring 1 so they won't lose when bunches of page faults + * interrupts overwhelms the interrupt handler(VEGA20) + */ +static void vega20_ih_reroute_ih(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* vega20 ih reroute will go through psp + * this function is only used for arcturus + */ + if (adev->asic_type == CHIP_ARCTURUS) { + /* Reroute to IH ring 1 for VMC */ + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12); + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); + + /* Reroute IH ring 1 for UTCL2 */ + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B); + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); + } +} + +/** + * vega20_ih_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it (VI). + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int vega20_ih_irq_init(struct amdgpu_device *adev) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + u32 ih_chicken; + int ret; + int i; + u32 tmp; + + /* disable irqs */ + ret = vega20_ih_toggle_interrupts(adev, false); + if (ret) + return ret; + + adev->nbio.funcs->ih_control(adev); + + if (adev->asic_type == CHIP_ARCTURUS && + adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); + if (adev->irq.ih.use_bus_addr) { + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, + MC_SPACE_GPA_ENABLE, 1); + } + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); + } + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + if (i == 1) + vega20_ih_reroute_ih(adev); + ret = vega20_ih_enable_ring(adev, ih[i]); + if (ret) + return ret; + } + } + + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); + tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, + CLIENT18_IS_STORM_CLIENT, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL); + tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp); + + pci_set_master(adev->pdev); + + /* enable interrupts */ + ret = vega20_ih_toggle_interrupts(adev, true); + if (ret) + return ret; + + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + + return 0; +} + +/** + * vega20_ih_irq_disable - disable interrupts + * + * @adev: amdgpu_device pointer + * + * Disable interrupts on the hw (VEGA20). + */ +static void vega20_ih_irq_disable(struct amdgpu_device *adev) +{ + vega20_ih_toggle_interrupts(adev, false); + + /* Wait and acknowledge irq */ + mdelay(1); +} + +/** + * vega20_ih_get_wptr - get the IH ring buffer wptr + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Get the IH ring buffer wptr from either the register + * or the writeback memory buffer (VEGA20). Also check for + * ring buffer overflow and deal with it. + * Returns the value of the wptr. + */ +static u32 vega20_ih_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + u32 wptr, tmp; + struct amdgpu_ih_regs *ih_regs; + + wptr = le32_to_cpu(*ih->wptr_cpu); + ih_regs = &ih->ih_regs; + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 32). Hopefully + * this should allow us to catchup. + */ + tmp = (wptr + 32) & ih->ptr_mask; + dev_warn(adev->dev, "IH ring buffer overflow " + "(0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, tmp); + ih->rptr = tmp; + + tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + +out: + return (wptr & ih->ptr_mask); +} + +/** + * vega20_ih_irq_rearm - rearm IRQ if lost + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + */ +static void vega20_ih_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t v = 0; + uint32_t i = 0; + struct amdgpu_ih_regs *ih_regs; + + ih_regs = &ih->ih_regs; + + /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } +} + +/** + * vega20_ih_set_rptr - set the IH ring buffer rptr + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Set the IH ring buffer rptr. + */ +static void vega20_ih_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + + if (ih->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + *ih->rptr_cpu = ih->rptr; + WDOORBELL32(ih->doorbell_index, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + vega20_ih_irq_rearm(adev, ih); + } else { + ih_regs = &ih->ih_regs; + WREG32(ih_regs->ih_rb_rptr, ih->rptr); + } +} + +/** + * vega20_ih_self_irq - dispatch work for ring 1 and 2 + * + * @adev: amdgpu_device pointer + * @source: irq source + * @entry: IV with WPTR update + * + * Update the WPTR from the IV and schedule work to handle the entries. + */ +static int vega20_ih_self_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t wptr = cpu_to_le32(entry->src_data[0]); + + switch (entry->ring_id) { + case 1: + *adev->irq.ih1.wptr_cpu = wptr; + schedule_work(&adev->irq.ih1_work); + break; + case 2: + *adev->irq.ih2.wptr_cpu = wptr; + schedule_work(&adev->irq.ih2_work); + break; + default: break; + } + return 0; +} + +static const struct amdgpu_irq_src_funcs vega20_ih_self_irq_funcs = { + .process = vega20_ih_self_irq, +}; + +static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev) +{ + adev->irq.self_irq.num_types = 0; + adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs; +} + +static int vega20_ih_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega20_ih_set_interrupt_funcs(adev); + vega20_ih_set_self_irq_funcs(adev); + return 0; +} + +static int vega20_ih_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, + &adev->irq.self_irq); + if (r) + return r; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true); + if (r) + return r; + + adev->irq.ih.use_doorbell = true; + adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; + + adev->irq.ih2.use_doorbell = true; + adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + + /* initialize ih control registers offset */ + vega20_ih_init_register_offset(adev); + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + if (r) + return r; + + r = amdgpu_irq_init(adev); + + return r; +} + +static int vega20_ih_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); + amdgpu_ih_ring_fini(adev, &adev->irq.ih2); + amdgpu_ih_ring_fini(adev, &adev->irq.ih1); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); + + return 0; +} + +static int vega20_ih_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vega20_ih_irq_init(adev); + if (r) + return r; + + return 0; +} + +static int vega20_ih_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega20_ih_irq_disable(adev); + + return 0; +} + +static int vega20_ih_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return vega20_ih_hw_fini(adev); +} + +static int vega20_ih_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return vega20_ih_hw_init(adev); +} + +static bool vega20_ih_is_idle(void *handle) +{ + /* todo */ + return true; +} + +static int vega20_ih_wait_for_idle(void *handle) +{ + /* todo */ + return -ETIMEDOUT; +} + +static int vega20_ih_soft_reset(void *handle) +{ + /* todo */ + + return 0; +} + +static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def, field_val; + + if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) { + def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL); + field_val = enable ? 0 : 1; + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DYN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + REG_CLK_SOFT_OVERRIDE, field_val); + if (def != data) + WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data); + } +} + +static int vega20_ih_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega20_ih_update_clockgating_state(adev, + state == AMD_CG_STATE_GATE); + return 0; + +} + +static int vega20_ih_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs vega20_ih_ip_funcs = { + .name = "vega20_ih", + .early_init = vega20_ih_early_init, + .late_init = NULL, + .sw_init = vega20_ih_sw_init, + .sw_fini = vega20_ih_sw_fini, + .hw_init = vega20_ih_hw_init, + .hw_fini = vega20_ih_hw_fini, + .suspend = vega20_ih_suspend, + .resume = vega20_ih_resume, + .is_idle = vega20_ih_is_idle, + .wait_for_idle = vega20_ih_wait_for_idle, + .soft_reset = vega20_ih_soft_reset, + .set_clockgating_state = vega20_ih_set_clockgating_state, + .set_powergating_state = vega20_ih_set_powergating_state, +}; + +static const struct amdgpu_ih_funcs vega20_ih_funcs = { + .get_wptr = vega20_ih_get_wptr, + .decode_iv = amdgpu_ih_decode_iv_helper, + .set_rptr = vega20_ih_set_rptr +}; + +static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev) +{ + adev->irq.ih_funcs = &vega20_ih_funcs; +} + +const struct amdgpu_ip_block_version vega20_ih_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &vega20_ih_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.h b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h new file mode 100644 index 000000000000..7af6d8758ee3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VEGA20_IH_H__ +#define __VEGA20_IH_H__ + +extern const struct amd_ip_funcs vega20_ih_ip_funcs; +extern const struct amdgpu_ip_block_version vega20_ih_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index d56b474b3a21..eafb76aebd00 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -642,11 +642,21 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) +/** + * vi_asic_pci_config_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Use PCI Config method to reset the GPU. + * + * Returns 0 for success. + */ +static int vi_asic_pci_config_reset(struct amdgpu_device *adev) { u32 i; + int r = -EINVAL; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); /* disable BM */ pci_clear_master(adev->pdev); @@ -661,29 +671,11 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) /* enable BM */ pci_set_master(adev->pdev); adev->has_hw_reset = true; - return 0; + r = 0; + break; } udelay(1); } - return -EINVAL; -} - -/** - * vi_asic_pci_config_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * - * Use PCI Config method to reset the GPU. - * - * Returns 0 for success. - */ -static int vi_asic_pci_config_reset(struct amdgpu_device *adev) -{ - int r; - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - r = vi_gpu_pci_config_reset(adev); amdgpu_atombios_scratch_regs_engine_hung(adev, false); |