diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
277 files changed, 9470 insertions, 4360 deletions
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index 13340f353ea8..216d932a7831 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -1,5 +1,6 @@ # SPDX-License-Identifier: MIT menu "ACP (Audio CoProcessor) Configuration" + depends on DRM_AMDGPU config DRM_AMD_ACP bool "Enable AMD Audio CoProcessor IP support" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index da3bcff61b97..8ac1581a6b53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -579,6 +579,7 @@ struct amdgpu_asic_funcs { /* invalidate hdp read cache */ void (*invalidate_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev); /* check if the asic needs a full reset of if soft reset will work */ bool (*need_full_reset)(struct amdgpu_device *adev); /* initialize doorbell layout for specific asic*/ @@ -944,6 +945,7 @@ struct amdgpu_device { /* s3/s4 mask */ bool in_suspend; + bool in_hibernate; /* record last mm index being written through WREG32*/ unsigned long last_mm_index; @@ -969,6 +971,7 @@ struct amdgpu_device { int pstate; /* enable runtime pm on the device */ bool runpm; + bool in_runpm; bool pm_sysfs_en; bool ucode_sysfs_en; @@ -992,6 +995,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + uint32_t acc_flags); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); @@ -1174,9 +1179,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool fbcon); -u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); -int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); -void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); +u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); +int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); +void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 8609287620ea..abfbe89e805e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/dma-buf.h> #include "amdgpu_xgmi.h" +#include <uapi/linux/kfd_ioctl.h> static const unsigned int compute_vmid_bitmap = 0xFF00; @@ -126,7 +127,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) /* this is going to have a few of the MSBs set that we need to * clear */ - bitmap_complement(gpu_resources.queue_bitmap, + bitmap_complement(gpu_resources.cp_queue_bitmap, adev->gfx.mec.queue_bitmap, KGD_MAX_QUEUES); @@ -137,7 +138,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) - clear_bit(i, gpu_resources.queue_bitmap); + clear_bit(i, gpu_resources.cp_queue_bitmap); amdgpu_doorbell_get_kfd_info(adev, &gpu_resources.doorbell_physical_address, @@ -178,18 +179,18 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); } -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev) +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) { if (adev->kfd.dev) - kgd2kfd_suspend(adev->kfd.dev); + kgd2kfd_suspend(adev->kfd.dev, run_pm); } -int amdgpu_amdkfd_resume(struct amdgpu_device *adev) +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) { int r = 0; if (adev->kfd.dev) - r = kgd2kfd_resume(adev->kfd.dev); + r = kgd2kfd_resume(adev->kfd.dev, run_pm); return r; } @@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr, bool mqd_gfx9) + void **cpu_ptr, bool cp_mqd_gfx9) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; @@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, bp.type = ttm_bo_type_kernel; bp.resv = NULL; - if (mqd_gfx9) - bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; + if (cp_mqd_gfx9) + bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9; r = amdgpu_bo_create(adev, &bp, &bo); if (r) { @@ -402,7 +403,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, if (amdgpu_sriov_vf(adev)) mem_info->mem_clk_max = adev->clock.default_mclk / 100; - else if (adev->powerplay.pp_funcs) { + else if (adev->pm.dpm_enabled) { if (amdgpu_emu_mode == 1) mem_info->mem_clk_max = 0; else @@ -427,7 +428,7 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) /* the sclk is in quantas of 10kHz */ if (amdgpu_sriov_vf(adev)) return adev->clock.default_sclk / 100; - else if (adev->powerplay.pp_funcs) + else if (adev->pm.dpm_enabled) return amdgpu_dpm_get_sclk(adev, false) / 100; else return 100; @@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, metadata_size, &metadata_flags); if (flags) { *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? - ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT; + KFD_IOC_ALLOC_MEM_FLAGS_VRAM + : KFD_IOC_ALLOC_MEM_FLAGS_GTT; if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - *flags |= ALLOC_MEM_FLAGS_PUBLIC; + *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; } out_put: @@ -525,6 +527,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd) return adev->gmc.xgmi.hive_id; } + +uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + return adev->unique_id; +} + uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src) { struct amdgpu_device *peer_adev = (struct amdgpu_device *)src; @@ -647,13 +657,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - uint32_t flush_type = 0; + const uint32_t flush_type = 0; bool all_hub = false; - if (adev->gmc.xgmi.num_physical_nodes && - adev->asic_type == CHIP_VEGA20) - flush_type = 2; - if (adev->family == AMDGPU_FAMILY_AI) all_hub = true; @@ -677,6 +683,11 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) { } +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) +{ + return 0; +} + void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm) { @@ -713,11 +724,11 @@ void kgd2kfd_exit(void) { } -void kgd2kfd_suspend(struct kfd_dev *kfd) +void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { } -int kgd2kfd_resume(struct kfd_dev *kfd) +int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 47b0f2957d1f..13feb313e9b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, struct mm_struct *mm); bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); struct amdkfd_process_info { /* List head of all VMs that belong to a KFD process */ @@ -122,8 +123,8 @@ struct amdkfd_process_info { int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev); -int amdgpu_amdkfd_resume(struct amdgpu_device *adev); +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); @@ -171,6 +172,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, uint32_t *flags); uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); +uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); @@ -240,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config); + /* KGD2KFD callbacks */ int kgd2kfd_init(void); void kgd2kfd_exit(void); @@ -249,8 +254,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); -void kgd2kfd_suspend(struct kfd_dev *kfd); -int kgd2kfd_resume(struct kfd_dev *kfd); +void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); +int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 4bcc175a149d..6529caca88fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -79,7 +79,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, dev_warn(adev->dev, "Invalid sdma engine id (%d), using engine id 0\n", engine_id); - /* fall through */ + fallthrough; case 0: sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; @@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index a7b17c8deb00..4ec6d0c03201 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -42,38 +42,6 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; -#if 0 -/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but - * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu - * changes commented out related code, doing the same here for now but - * need to sync with Ken et al - */ - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); -#endif - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -805,7 +773,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, - .get_tile_config = amdgpu_amdkfd_get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, + .get_unique_id = amdgpu_amdkfd_get_unique_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 8f052e98a3c6..0b7e78748540 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 19a10db93d68..ccd635b812b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -41,31 +41,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 8562afe5b761..df841c2ac5e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -48,28 +48,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; - -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -736,7 +714,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, + .get_unique_id = amdgpu_amdkfd_get_unique_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 63d3e6683dfe..aedf67d57449 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, uint8_t vmid, uint16_t *p_pasid); -int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index fa8ac9d19a7a..6a5b91d23fd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -29,6 +29,7 @@ #include "amdgpu_vm.h" #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" +#include <uapi/linux/kfd_ioctl.h> /* BO flag to indicate a KFD userptr BO */ #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) @@ -276,6 +277,42 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, return 0; } +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) +{ + struct amdgpu_bo *root = bo; + struct amdgpu_vm_bo_base *vm_bo; + struct amdgpu_vm *vm; + struct amdkfd_process_info *info; + struct amdgpu_amdkfd_fence *ef; + int ret; + + /* we can always get vm_bo from root PD bo.*/ + while (root->parent) + root = root->parent; + + vm_bo = root->vm_bo; + if (!vm_bo) + return 0; + + vm = vm_bo->vm; + if (!vm) + return 0; + + info = vm->process_info; + if (!info || !info->eviction_fence) + return 0; + + ef = container_of(dma_fence_get(&info->eviction_fence->base), + struct amdgpu_amdkfd_fence, base); + + BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); + ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); + dma_resv_unlock(bo->tbo.base.resv); + + dma_fence_put(&ef->base); + return ret; +} + static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, bool wait) { @@ -364,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); - bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT; + bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; uint32_t mapping_flags; mapping_flags = AMDGPU_VM_PAGE_READABLE; - if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE) + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; - if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE) + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; switch (adev->asic_type) { case CHIP_ARCTURUS: - if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) { + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { if (bo_adev == adev) mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; @@ -847,9 +884,9 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info, vm_list_node) { struct amdgpu_bo *pd = peer_vm->root.base.bo; - ret = amdgpu_sync_resv(NULL, - sync, pd->tbo.base.resv, - AMDGPU_FENCE_OWNER_KFD, false); + ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, + AMDGPU_SYNC_NE_OWNER, + AMDGPU_FENCE_OWNER_KFD); if (ret) return ret; } @@ -1044,6 +1081,8 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, list_del(&vm->vm_list_node); mutex_unlock(&process_info->lock); + vm->process_info = NULL; + /* Release per-process resources when last compute VM is destroyed */ if (!process_info->n_vms) { WARN_ON(!list_empty(&process_info->kfd_bo_list)); @@ -1122,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( /* * Check on which domain to allocate BO */ - if (flags & ALLOC_MEM_FLAGS_VRAM) { + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ? + alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - } else if (flags & ALLOC_MEM_FLAGS_GTT) { + } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; - } else if (flags & ALLOC_MEM_FLAGS_USERPTR) { + } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; alloc_flags = 0; if (!offset || !*offset) return -EINVAL; user_addr = untagged_addr(*offset); - } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL | - ALLOC_MEM_FLAGS_MMIO_REMAP)) { + } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; bo_type = ttm_bo_type_sg; @@ -1160,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( } INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); - (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); + (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); /* Workaround for AQL queue wraparound bug. Map the same * memory twice. That means we only actually allocate half @@ -1304,7 +1343,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } /* Free the BO*/ - amdgpu_bo_unref(&mem->bo); + drm_gem_object_put_unlocked(&mem->bo->tbo.base); mutex_destroy(&mem->lock); kfree(mem); @@ -1642,12 +1681,15 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); + (*mem)->alloc_flags = ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? - ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) | - ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE; + KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) + | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE + | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; - (*mem)->bo = amdgpu_bo_ref(bo); + drm_gem_object_get(&bo->tbo.base); + (*mem)->bo = bo; (*mem)->va = va; (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; @@ -2204,3 +2246,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) kfree(mem); return 0; } + +/* Returns GPU-specific tiling mode information */ +int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + /* Those values are not set from GFX9 onwards */ + config->num_banks = adev->gfx.config.num_banks; + config->num_ranks = adev->gfx.config.num_ranks; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 50dff69a0f6e..b1172d93c99c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = adev->pdev->rom; + size_t romlen = adev->pdev->romlen; + void __iomem *bios; adev->bios = NULL; - bios = pci_platform_rom(adev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - adev->bios = kzalloc(size, GFP_KERNEL); - if (adev->bios == NULL) + adev->bios = kzalloc(romlen, GFP_KERNEL); + if (!adev->bios) return false; - memcpy_fromio(adev->bios, bios, size); + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; - if (!check_atom_bios(adev->bios, size)) { - kfree(adev->bios); - return false; - } + memcpy_fromio(adev->bios, bios, romlen); + iounmap(bios); - adev->bios_size = size; + if (!check_atom_bios(adev->bios, romlen)) + goto free_bios; + + adev->bios_size = romlen; return true; +free_bios: + kfree(adev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index a62cbc8199de..f355d9a752d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1461,6 +1461,20 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector return MODE_OK; } +static int +amdgpu_connector_late_register(struct drm_connector *connector) +{ + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + int r = 0; + + if (amdgpu_connector->ddc_bus->has_aux) { + amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev; + r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux); + } + + return r; +} + static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = { .get_modes = amdgpu_connector_dp_get_modes, .mode_valid = amdgpu_connector_dp_mode_valid, @@ -1475,6 +1489,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = { .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, + .late_register = amdgpu_connector_late_register, }; static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { @@ -1485,6 +1500,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, + .late_register = amdgpu_connector_late_register, }; void @@ -1931,7 +1947,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; - drm_connector_register(connector); if (has_aux) amdgpu_atombios_dp_aux_init(amdgpu_connector); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a52a084158b1..af91627b19b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -28,6 +28,7 @@ #include <linux/file.h> #include <linux/pagemap.h> #include <linux/sync_file.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> #include <drm/drm_syncobj.h> @@ -415,7 +416,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, /* Don't move this buffer if we have depleted our allowance * to move it. Don't move anything if the threshold is zero. */ - if (p->bytes_moved < p->bytes_moved_threshold) { + if (p->bytes_moved < p->bytes_moved_threshold && + (!bo->tbo.base.dma_buf || + list_empty(&bo->tbo.base.dma_buf->attachments))) { if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { /* And don't move a CPU_ACCESS_REQUIRED BO to limited @@ -651,16 +654,19 @@ out: static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) { + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_bo_list_entry *e; int r; list_for_each_entry(e, &p->validated, tv.head) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); struct dma_resv *resv = bo->tbo.base.resv; + enum amdgpu_sync_mode sync_mode; - r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, - amdgpu_bo_explicit_sync(bo)); - + sync_mode = amdgpu_bo_explicit_sync(bo) ? + AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; + r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, + &fpriv->vm); if (r) return r; } @@ -1202,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct drm_sched_entity *entity = p->entity; enum drm_sched_priority priority; - struct amdgpu_ring *ring; struct amdgpu_bo_list_entry *e; struct amdgpu_job *job; uint64_t seq; @@ -1211,7 +1216,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job = p->job; p->job = NULL; - r = drm_sched_job_init(&job->base, entity, p->filp); + r = drm_sched_job_init(&job->base, entity, &fpriv->vm); if (r) goto error_unlock; @@ -1255,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); - amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 94a6c42f29ea..6ed36a2c5f73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } +static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) +{ + switch (prio) { + case DRM_SCHED_PRIORITY_HIGH_HW: + case DRM_SCHED_PRIORITY_KERNEL: + return AMDGPU_GFX_PIPE_PRIO_HIGH; + default: + return AMDGPU_GFX_PIPE_PRIO_NORMAL; + } +} + static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring) { struct amdgpu_device *adev = ctx->adev; struct amdgpu_ctx_entity *entity; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; unsigned num_scheds = 0; + enum gfx_pipe_priority hw_prio; enum drm_sched_priority priority; int r; @@ -79,46 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - sched = &adev->gfx.gfx_ring[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_COMPUTE: - scheds = adev->gfx.compute_sched; - num_scheds = adev->gfx.num_compute_sched; - break; - case AMDGPU_HW_IP_DMA: - scheds = adev->sdma.sdma_sched; - num_scheds = adev->sdma.num_sdma_sched; - break; - case AMDGPU_HW_IP_UVD: - sched = &adev->uvd.inst[0].ring.sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_VCE: - sched = &adev->vce.ring[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_UVD_ENC: - sched = &adev->uvd.inst[0].ring_enc[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_VCN_DEC: - scheds = adev->vcn.vcn_dec_sched; - num_scheds = adev->vcn.num_vcn_dec_sched; - break; - case AMDGPU_HW_IP_VCN_ENC: - scheds = adev->vcn.vcn_enc_sched; - num_scheds = adev->vcn.num_vcn_enc_sched; - break; - case AMDGPU_HW_IP_VCN_JPEG: - scheds = adev->jpeg.jpeg_sched; - num_scheds = adev->jpeg.num_jpeg_sched; - break; + case AMDGPU_HW_IP_GFX: + sched = &adev->gfx.gfx_ring[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_COMPUTE: + hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); + scheds = adev->gfx.compute_prio_sched[hw_prio]; + num_scheds = adev->gfx.num_compute_sched[hw_prio]; + break; + case AMDGPU_HW_IP_DMA: + scheds = adev->sdma.sdma_sched; + num_scheds = adev->sdma.num_sdma_sched; + break; + case AMDGPU_HW_IP_UVD: + sched = &adev->uvd.inst[0].ring.sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCE: + sched = &adev->vce.ring[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_UVD_ENC: + sched = &adev->uvd.inst[0].ring_enc[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCN_DEC: + sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched, + adev->vcn.num_vcn_dec_sched); + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCN_ENC: + sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched, + adev->vcn.num_vcn_enc_sched); + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCN_JPEG: + scheds = adev->jpeg.jpeg_sched; + num_scheds = adev->jpeg.num_jpeg_sched; + break; } r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, @@ -502,6 +519,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, return fence; } +static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, + struct amdgpu_ctx_entity *aentity, + int hw_ip, + enum drm_sched_priority priority) +{ + struct amdgpu_device *adev = ctx->adev; + enum gfx_pipe_priority hw_prio; + struct drm_gpu_scheduler **scheds = NULL; + unsigned num_scheds; + + /* set sw priority */ + drm_sched_entity_set_priority(&aentity->entity, priority); + + /* set hw priority */ + if (hw_ip == AMDGPU_HW_IP_COMPUTE) { + hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); + scheds = adev->gfx.compute_prio_sched[hw_prio]; + num_scheds = adev->gfx.num_compute_sched[hw_prio]; + drm_sched_entity_modify_sched(&aentity->entity, scheds, + num_scheds); + } +} + void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { @@ -514,13 +554,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, ctx->init_priority : ctx->override_priority; for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { - struct drm_sched_entity *entity; - if (!ctx->entities[i][j]) continue; - entity = &ctx->entities[i][j]->entity; - drm_sched_entity_set_priority(entity, ctx_prio); + amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j], + i, ctx_prio); } } } @@ -628,20 +666,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) mutex_destroy(&mgr->lock); } + +static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev) +{ + int num_compute_sched_normal = 0; + int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1; + int i; + + /* use one drm sched array, gfx.compute_sched to store both high and + * normal priority drm compute schedulers */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + if (!adev->gfx.compute_ring[i].has_high_prio) + adev->gfx.compute_sched[num_compute_sched_normal++] = + &adev->gfx.compute_ring[i].sched; + else + adev->gfx.compute_sched[num_compute_sched_high--] = + &adev->gfx.compute_ring[i].sched; + } + + /* compute ring only has two priority for now */ + i = AMDGPU_GFX_PIPE_PRIO_NORMAL; + adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; + adev->gfx.num_compute_sched[i] = num_compute_sched_normal; + + i = AMDGPU_GFX_PIPE_PRIO_HIGH; + if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) { + /* When compute has no high priority rings then use */ + /* normal priority sched array */ + adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; + adev->gfx.num_compute_sched[i] = num_compute_sched_normal; + } else { + adev->gfx.compute_prio_sched[i] = + &adev->gfx.compute_sched[num_compute_sched_high - 1]; + adev->gfx.num_compute_sched[i] = + adev->gfx.num_compute_rings - num_compute_sched_normal; + } +} + void amdgpu_ctx_init_sched(struct amdgpu_device *adev) { int i, j; + amdgpu_ctx_init_compute_sched(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) { adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched; adev->gfx.num_gfx_sched++; } - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched; - adev->gfx.num_compute_sched++; - } - for (i = 0; i < adev->sdma.num_instances; i++) { adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched; adev->sdma.num_sdma_sched++; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 337d7cdce8e9..c0f9a651dc06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -31,6 +31,9 @@ #include <drm/drm_debugfs.h> #include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_dm_debugfs.h" +#include "amdgpu_ras.h" /** * amdgpu_debugfs_add_files - Add simple debugfs entries @@ -176,7 +179,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } else { r = get_user(value, (uint32_t *)buf); if (!r) - WREG32(*pos >> 2, value); + amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0); } if (r) { result = r; @@ -840,6 +843,55 @@ err: return result; } +/** + * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF + * + * @f: open file handle + * @buf: User buffer to write data from + * @size: Number of bytes to write + * @pos: Offset to seek to + * + * Write a 32-bit zero to disable or a 32-bit non-zero to enable + */ +static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + + while (size) { + uint32_t value; + + r = get_user(value, (uint32_t *)buf); + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return r; + } + + amdgpu_gfx_off_ctrl(adev, value ? true : false); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + return result; +} + + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -888,6 +940,11 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_gfxoff_fops = { + .owner = THIS_MODULE, + .write = amdgpu_debugfs_gfxoff_write, +}; + static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_didt_fops, @@ -897,6 +954,7 @@ static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_sensors_fops, &amdgpu_debugfs_wave_fops, &amdgpu_debugfs_gpr_fops, + &amdgpu_debugfs_gfxoff_fops, }; static const char *debugfs_regs_names[] = { @@ -908,6 +966,7 @@ static const char *debugfs_regs_names[] = { "amdgpu_sensors", "amdgpu_wave", "amdgpu_gpr", + "amdgpu_gfxoff", }; /** @@ -934,18 +993,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) return 0; } -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) -{ - unsigned i; - - for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { - if (adev->debugfs_regs[i]) { - debugfs_remove(adev->debugfs_regs[i]); - adev->debugfs_regs[i] = NULL; - } - } -} - static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1211,11 +1258,47 @@ failure: return 0; } +static int amdgpu_debugfs_sclk_set(void *data, u64 val) +{ + int ret = 0; + uint32_t max_freq, min_freq; + struct amdgpu_device *adev = (struct amdgpu_device *)data; + + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + + ret = pm_runtime_get_sync(adev->ddev->dev); + if (ret < 0) + return ret; + + if (is_support_sw_smu(adev)) { + ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true); + if (ret || val > max_freq || val < min_freq) + return -EINVAL; + ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true); + } else { + return 0; + } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (ret) + return -EINVAL; + + return 0; +} + DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL, amdgpu_debugfs_ib_preempt, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL, + amdgpu_debugfs_sclk_set, "%llu\n"); + int amdgpu_debugfs_init(struct amdgpu_device *adev) { + int r, i; + adev->debugfs_preempt = debugfs_create_file("amdgpu_preempt_ib", 0600, adev->ddev->primary->debugfs_root, adev, @@ -1225,24 +1308,78 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return -EIO; } + adev->smu.debugfs_sclk = + debugfs_create_file("amdgpu_force_sclk", 0200, + adev->ddev->primary->debugfs_root, adev, + &fops_sclk_set); + if (!(adev->smu.debugfs_sclk)) { + DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); + return -EIO; + } + + /* Register debugfs entries for amdgpu_ttm */ + r = amdgpu_ttm_debugfs_init(adev); + if (r) { + DRM_ERROR("Failed to init debugfs\n"); + return r; + } + + r = amdgpu_debugfs_pm_init(adev); + if (r) { + DRM_ERROR("Failed to register debugfs file for dpm!\n"); + return r; + } + + if (amdgpu_debugfs_sa_init(adev)) { + dev_err(adev->dev, "failed to register debugfs file for SA\n"); + } + + if (amdgpu_debugfs_fence_init(adev)) + dev_err(adev->dev, "fence debugfs file creation failed\n"); + + r = amdgpu_debugfs_gem_init(adev); + if (r) + DRM_ERROR("registering gem debugfs failed (%d).\n", r); + + r = amdgpu_debugfs_regs_init(adev); + if (r) + DRM_ERROR("registering register debugfs failed (%d).\n", r); + + r = amdgpu_debugfs_firmware_init(adev); + if (r) + DRM_ERROR("registering firmware debugfs failed (%d).\n", r); + +#if defined(CONFIG_DRM_AMD_DC) + if (amdgpu_device_has_dc_support(adev)) { + if (dtn_debugfs_init(adev)) + DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n"); + } +#endif + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring) + continue; + + if (amdgpu_debugfs_ring_init(adev, ring)) { + DRM_ERROR("Failed to register debugfs file for rings !\n"); + } + } + + amdgpu_ras_debugfs_create_all(adev); + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, ARRAY_SIZE(amdgpu_debugfs_list)); } -void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) -{ - debugfs_remove(adev->debugfs_preempt); -} - #else int amdgpu_debugfs_init(struct amdgpu_device *adev) { return 0; } -void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { } int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index f289d28ad6b2..de12d1101526 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -32,9 +32,8 @@ struct amdgpu_debugfs { }; int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); int amdgpu_debugfs_init(struct amdgpu_device *adev); -void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev); +void amdgpu_debugfs_fini(struct amdgpu_device *adev); int amdgpu_debugfs_add_files(struct amdgpu_device *adev, const struct drm_info_list *files, unsigned nfiles); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b8975857d60d..affde2de2a0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -183,20 +183,51 @@ bool amdgpu_device_supports_baco(struct drm_device *dev) void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, uint32_t *buf, size_t size, bool write) { - uint64_t last; unsigned long flags; + uint32_t hi = ~0; + uint64_t last; + + +#ifdef CONFIG_64BIT + last = min(pos + size, adev->gmc.visible_vram_size); + if (last > pos) { + void __iomem *addr = adev->mman.aper_base_kaddr + pos; + size_t count = last - pos; + + if (write) { + memcpy_toio(addr, buf, count); + mb(); + amdgpu_asic_flush_hdp(adev, NULL); + } else { + amdgpu_asic_invalidate_hdp(adev, NULL); + mb(); + memcpy_fromio(buf, addr, count); + } + + if (count == size) + return; + + pos += count; + buf += count / 4; + size -= count; + } +#endif + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + for (last = pos + size; pos < last; pos += 4) { + uint32_t tmp = pos >> 31; - last = size - 4; - for (last += pos; pos <= last; pos += 4) { - spin_lock_irqsave(&adev->mmio_idx_lock, flags); WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); + if (tmp != hi) { + WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); + hi = tmp; + } if (write) WREG32_NO_KIQ(mmMM_DATA, *buf++); else *buf++ = RREG32_NO_KIQ(mmMM_DATA); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } /* @@ -275,6 +306,26 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) BUG(); } +void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) +{ + trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + + if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) + writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); + else { + unsigned long flags; + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); + writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } +} + /** * amdgpu_mm_wreg - write to a memory mapped IO register * @@ -288,8 +339,6 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) { - trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); - if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { adev->last_mm_index = v; } @@ -297,20 +346,26 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) return amdgpu_kiq_wreg(adev, reg, v); - if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) - writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); - else { - unsigned long flags; + amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); +} - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); - writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - } +/* + * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range + * + * this function is invoked only the debugfs register access + * */ +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + uint32_t acc_flags) +{ + if (amdgpu_sriov_fullaccess(adev) && + adev->gfx.rlc.funcs && + adev->gfx.rlc.funcs->is_rlcg_access_range) { - if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { - udelay(500); + if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) + return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v); } + + amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); } /** @@ -1136,7 +1191,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return dev->open_count == 0; + return atomic_read(&dev->open_count) == 0; } static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { @@ -1953,8 +2008,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) */ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) { - return !!memcmp(adev->gart.ptr, adev->reset_magic, - AMDGPU_RESET_MAGIC_NUM); + if (memcmp(adev->gart.ptr, adev->reset_magic, + AMDGPU_RESET_MAGIC_NUM)) + return true; + + if (!adev->in_gpu_reset) + return false; + + /* + * For all ASICs with baco/mode1 reset, the VRAM is + * always assumed to be lost. + */ + switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_BACO: + case AMD_RESET_METHOD_MODE1: + return true; + default: + return false; + } } /** @@ -2344,15 +2415,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } adev->ip_blocks[i].status.hw = false; /* handle putting the SMC in the appropriate state */ - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); - if (r) { - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", - adev->mp1_state, r); - return r; + if(!amdgpu_sriov_vf(adev)){ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); + if (r) { + DRM_ERROR("SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); + return r; + } } } - adev->ip_blocks[i].status.hw = false; } @@ -2686,6 +2758,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) if (adev->asic_reset_res) goto fail; + + if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count) + adev->mmhub.funcs->reset_ras_error_count(adev); } else { task_barrier_full(&hive->tb); @@ -2800,7 +2875,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; if (amdgpu_emu_mode == 1) - adev->usec_timeout *= 2; + adev->usec_timeout *= 10; adev->gmc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; @@ -3088,22 +3163,6 @@ fence_driver_init: } else adev->ucode_sysfs_en = true; - r = amdgpu_debugfs_gem_init(adev); - if (r) - DRM_ERROR("registering gem debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_regs_init(adev); - if (r) - DRM_ERROR("registering register debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_firmware_init(adev); - if (r) - DRM_ERROR("registering firmware debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_init(adev); - if (r) - DRM_ERROR("Creating debugfs files failed (%d).\n", r); - if ((amdgpu_testing & 1)) { if (adev->accel_working) amdgpu_test_moves(adev); @@ -3177,6 +3236,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev) flush_delayed_work(&adev->delayed_init_work); adev->shutdown = true; + /* make sure IB test finished before entering exclusive mode + * to avoid preemption on IB test + * */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ @@ -3219,13 +3284,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rmmio = NULL; amdgpu_device_doorbell_fini(adev); - amdgpu_debugfs_regs_cleanup(adev); device_remove_file(adev->dev, &dev_attr_pcie_replay_count); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); - amdgpu_debugfs_preempt_cleanup(adev); if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) amdgpu_discovery_fini(adev); } @@ -3309,12 +3372,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) } } - amdgpu_amdkfd_suspend(adev); - amdgpu_ras_suspend(adev); r = amdgpu_device_ip_suspend_phase1(adev); + amdgpu_amdkfd_suspend(adev, !fbcon); + /* evict vram memory */ amdgpu_bo_evict_vram(adev); @@ -3393,7 +3456,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) } } } - r = amdgpu_amdkfd_resume(adev); + r = amdgpu_amdkfd_resume(adev, !fbcon); if (r) return r; @@ -3866,8 +3929,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, } } - if (!r && amdgpu_ras_intr_triggered()) + if (!r && amdgpu_ras_intr_triggered()) { + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (tmp_adev->mmhub.funcs && + tmp_adev->mmhub.funcs->reset_ras_error_count) + tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev); + } + amdgpu_ras_intr_cleared(); + } list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (need_full_reset) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index f95092741c38..27d8ae19a7a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -307,7 +307,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { - DRM_INFO("set register base offset for %s\n", + DRM_DEBUG("set register base offset for %s\n", hw_id_names[le16_to_cpu(ip->hw_id)]); adev->reg_offset[hw_ip][ip->number_instance] = ip->base_address; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 6d520a3eec40..84cee27cd7ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -99,7 +99,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work) & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (int)(work->target_vblank - - amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) { + amdgpu_get_vblank_counter_kms(crtc)) > 0) { schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); return; } @@ -219,7 +219,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, if (!adev->enable_virtual_display) work->base = amdgpu_bo_gpu_offset(new_abo); work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + - amdgpu_get_vblank_counter_kms(dev, work->crtc_id); + amdgpu_get_vblank_counter_kms(crtc); /* we borrow the event spin lock for protecting flip_wrok */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -924,3 +924,15 @@ int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) return AMDGPU_CRTC_IRQ_NONE; } } + +bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, + bool in_vblank_irq, int *vpos, + int *hpos, ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; + + return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, + stime, etime, mode); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index a59cd47aa6c1..ffeb20f11c07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -223,6 +223,37 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, } /** + * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation + * + * @attach: attachment to pin down + * + * Pin the BO which is backing the DMA-buf so that it can't move any more. + */ +static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + /* pin buffer into GTT */ + return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); +} + +/** + * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation + * + * @attach: attachment to unpin + * + * Unpin a previously pinned BO to make it movable again. + */ +static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + amdgpu_bo_unpin(bo); +} + +/** * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation * @attach: DMA-buf attachment * @dir: DMA direction @@ -244,9 +275,19 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, struct sg_table *sgt; long r; - r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); - if (r) - return ERR_PTR(r); + if (!bo->pin_count) { + /* move buffer into GTT */ + struct ttm_operation_ctx ctx = { false, false }; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + return ERR_PTR(r); + + } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) & + AMDGPU_GEM_DOMAIN_GTT)) { + return ERR_PTR(-EBUSY); + } sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); if (IS_ERR(sgt)) @@ -277,13 +318,9 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - struct drm_gem_object *obj = attach->dmabuf->priv; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); sg_free_table(sgt); kfree(sgt); - amdgpu_bo_unpin(bo); } /** @@ -327,9 +364,10 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, } const struct dma_buf_ops amdgpu_dmabuf_ops = { - .dynamic_mapping = true, .attach = amdgpu_dma_buf_attach, .detach = amdgpu_dma_buf_detach, + .pin = amdgpu_dma_buf_pin, + .unpin = amdgpu_dma_buf_unpin, .map_dma_buf = amdgpu_dma_buf_map, .unmap_dma_buf = amdgpu_dma_buf_unmap, .release = drm_gem_dmabuf_release, @@ -413,6 +451,73 @@ error: } /** + * amdgpu_dma_buf_move_notify - &attach.move_notify implementation + * + * @attach: the DMA-buf attachment + * + * Invalidate the DMA-buf attachment, making sure that the we re-create the + * mapping before the next use. + */ +static void +amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->importer_priv; + struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_operation_ctx ctx = { false, false }; + struct ttm_placement placement = {}; + struct amdgpu_vm_bo_base *bo_base; + int r; + + if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM) + return; + + r = ttm_bo_validate(&bo->tbo, &placement, &ctx); + if (r) { + DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r); + return; + } + + for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { + struct amdgpu_vm *vm = bo_base->vm; + struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + + if (ticket) { + /* When we get an error here it means that somebody + * else is holding the VM lock and updating page tables + * So we can just continue here. + */ + r = dma_resv_lock(resv, ticket); + if (r) + continue; + + } else { + /* TODO: This is more problematic and we actually need + * to allow page tables updates without holding the + * lock. + */ + if (!dma_resv_trylock(resv)) + continue; + } + + r = amdgpu_vm_clear_freed(adev, vm, NULL); + if (!r) + r = amdgpu_vm_handle_moved(adev, vm); + + if (r && r != -EBUSY) + DRM_ERROR("Failed to invalidate VM page tables (%d))\n", + r); + + dma_resv_unlock(resv); + } +} + +static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { + .move_notify = amdgpu_dma_buf_move_notify +}; + +/** * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation * @dev: DRM device * @dma_buf: Shared DMA buffer @@ -444,7 +549,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, if (IS_ERR(obj)) return obj; - attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true); + attach = dma_buf_dynamic_attach(dma_buf, dev->dev, + &amdgpu_dma_buf_attach_ops, obj); if (IS_ERR(attach)) { drm_gem_object_put(obj); return ERR_CAST(attach); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index a2e8c3dfb4f1..ba1bb95a3cf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -1171,3 +1171,20 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, return ret; } + +int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, + uint32_t cstate) +{ + int ret = 0; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + + if (is_support_sw_smu(adev)) + ret = smu_set_df_cstate(smu, cstate); + else if (pp_funcs && + pp_funcs->set_df_cstate) + ret = pp_funcs->set_df_cstate(pp_handle, cstate); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 902ca6c00cca..936d85aa0fbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -448,6 +448,8 @@ struct amdgpu_pm { /* powerplay feature */ uint32_t pp_feature; + /* Used for I2C access to various EEPROMs on relevant ASICs */ + struct i2c_adapter smu_i2c; }; #define R600_SSTU_DFLT 0 @@ -533,4 +535,7 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev); int amdgpu_dpm_baco_enter(struct amdgpu_device *adev); +int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, + uint32_t cstate); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 42f4febe24c6..a735d79a717b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -85,9 +85,10 @@ * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask * - 3.36.0 - Allow reading more status registers on si/cik + * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 36 +#define KMS_DRIVER_MINOR 37 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -1021,6 +1022,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct drm_device *dev; + struct amdgpu_device *adev; unsigned long flags = ent->driver_data; int ret, retry = 0; bool supports_atomic = false; @@ -1090,6 +1092,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); + amdgpu_driver_load_kms(dev, ent->driver_data); + retry_init: ret = drm_dev_register(dev, ent->driver_data); if (ret == -EAGAIN && ++retry <= 3) { @@ -1100,6 +1104,11 @@ retry_init: } else if (ret) goto err_pci; + adev = dev->dev_private; + ret = amdgpu_debugfs_init(adev); + if (ret) + DRM_ERROR("Creating debugfs files failed (%d).\n", ret); + return 0; err_pci: @@ -1119,9 +1128,10 @@ amdgpu_pci_remove(struct pci_dev *pdev) #endif DRM_ERROR("Hotplug removal is not supported\n"); drm_dev_unplug(dev); - drm_dev_put(dev); + amdgpu_driver_unload_kms(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); } static void @@ -1171,7 +1181,9 @@ static int amdgpu_pmops_freeze(struct device *dev) struct amdgpu_device *adev = drm_dev->dev_private; int r; + adev->in_hibernate = true; r = amdgpu_device_suspend(drm_dev, true); + adev->in_hibernate = false; if (r) return r; return amdgpu_asic_reset(adev); @@ -1220,11 +1232,15 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) } } + adev->in_runpm = true; if (amdgpu_device_supports_boco(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); ret = amdgpu_device_suspend(drm_dev, false); + if (ret) + return ret; + if (amdgpu_device_supports_boco(drm_dev)) { /* Only need to handle PCI state in the driver for ATPX * PCI core handles it for _PR3. @@ -1278,6 +1294,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) drm_kms_helper_poll_enable(drm_dev); if (amdgpu_device_supports_boco(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + adev->in_runpm = false; return 0; } @@ -1285,24 +1302,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_dev->dev_private; - struct drm_crtc *crtc; + /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ + int ret = 1; if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } - list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { - if (crtc->enabled) { - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); - return -EBUSY; + if (amdgpu_device_has_dc_support(adev)) { + struct drm_crtc *crtc; + + drm_modeset_lock_all(drm_dev); + + drm_for_each_crtc(crtc, drm_dev) { + if (crtc->state->active) { + ret = -EBUSY; + break; + } + } + + drm_modeset_unlock_all(drm_dev); + + } else { + struct drm_connector *list_connector; + struct drm_connector_list_iter iter; + + mutex_lock(&drm_dev->mode_config.mutex); + drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); + + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->dpms == DRM_MODE_DPMS_ON) { + ret = -EBUSY; + break; + } } + + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); + mutex_unlock(&drm_dev->mode_config.mutex); } + if (ret == -EBUSY) + DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); + pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); - /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ - return 1; + return ret; } long amdgpu_drm_ioctl(struct file *filp, @@ -1377,32 +1425,15 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -static bool -amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) -{ - return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, - stime, etime, mode); -} - static struct drm_driver kms_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE, - .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, .lastclose = amdgpu_driver_lastclose_kms, - .unload = amdgpu_driver_unload_kms, - .get_vblank_counter = amdgpu_get_vblank_counter_kms, - .enable_vblank = amdgpu_enable_vblank_kms, - .disable_vblank = amdgpu_disable_vblank_kms, - .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, - .get_scanout_position = amdgpu_get_crtc_scanout_position, .irq_handler = amdgpu_irq_handler, .ioctls = amdgpu_ioctls_kms, .gem_free_object_unlocked = amdgpu_gem_object_free, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 2672dc64a310..25ddb482466a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, u32 cpp; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED | - AMDGPU_GEM_CREATE_CPU_GTT_USWC; + AMDGPU_GEM_CREATE_VRAM_CLEARED; info = drm_get_format_info(adev->ddev, mode_cmd); cpp = info->cpp[0]; @@ -336,15 +335,12 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, &amdgpu_fb_helper_funcs); - ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, - AMDGPUFB_CONN_LIMIT); + ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper); if (ret) { kfree(rfbdev); return ret; } - drm_fb_helper_single_add_all_connectors(&rfbdev->helper); - /* disable all the possible outputs/crtcs before entering KMS mode */ if (!amdgpu_device_has_dc_support(adev)) drm_helper_disable_unused_functions(adev->ddev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3c01252b1e0e..7531527067df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -503,9 +503,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, */ int amdgpu_fence_driver_init(struct amdgpu_device *adev) { - if (amdgpu_debugfs_fence_init(adev)) - dev_err(adev->dev, "fence debugfs file creation failed\n"); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 0f960b498792..6b9c9193cdfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) return adev->gfx.mec.num_mec > 1; } +bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, + int queue) +{ + /* Policy: make queue 0 of each pipe as high priority compute queue */ + return (queue == 0); + +} + void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) { int i, queue, pipe, mec; @@ -477,7 +485,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], RESET_QUEUES, 0, 0); - return amdgpu_ring_test_ring(kiq_ring); + return amdgpu_ring_test_helper(kiq_ring); } int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) @@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) int r; struct ras_fs_if fs_info = { .sysfs_name = "gfx_err_count", - .debugfs_name = "gfx_err_inject", }; struct ras_ih_if ih_info = { .cb = amdgpu_gfx_process_ras_data_cb, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index ca17ffb01301..5825692d07e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -41,6 +41,15 @@ #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES +enum gfx_pipe_priority { + AMDGPU_GFX_PIPE_PRIO_NORMAL = 1, + AMDGPU_GFX_PIPE_PRIO_HIGH, + AMDGPU_GFX_PIPE_PRIO_MAX +}; + +#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 +#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -151,6 +160,8 @@ struct amdgpu_gfx_config { unsigned num_gpus; unsigned multi_gpu_tile_size; unsigned mc_arb_ramcfg; + unsigned num_banks; + unsigned num_ranks; unsigned gb_addr_config; unsigned num_rbs; unsigned gs_vgt_table_depth; @@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs { u32 queue, u32 vmid); int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); + void (*reset_ras_error_count) (struct amdgpu_device *adev); }; struct sq_work { @@ -278,8 +290,9 @@ struct amdgpu_gfx { uint32_t num_gfx_sched; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; + struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX]; struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS]; - uint32_t num_compute_sched; + uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX]; unsigned num_compute_rings; struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src priv_reg_irq; @@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, int pipe, int queue); +bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, + int queue); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 60655834d649..ccbd7acfc4cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -48,7 +48,6 @@ * produce command buffers which are send to the kernel and * put in IBs for execution by the requested ring. */ -static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); /** * amdgpu_ib_get - request an IB (Indirect Buffer) @@ -295,9 +294,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev) } adev->ib_pool_ready = true; - if (amdgpu_debugfs_sa_init(adev)) { - dev_err(adev->dev, "failed to register debugfs file for SA\n"); - } + return 0; } @@ -421,7 +418,7 @@ static const struct drm_info_list amdgpu_debugfs_sa_list[] = { #endif -static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) +int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index d42be880a236..4981e443a884 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) static void amdgpu_job_free_cb(struct drm_sched_job *s_job) { - struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); drm_sched_job_cleanup(s_job); - amdgpu_ring_priority_put(ring, s_job->s_priority); dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); @@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { enum drm_sched_priority priority; - struct amdgpu_ring *ring; int r; if (!f) @@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 60591dbc2097..fd1dc3236eca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) goto done_free; - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_request_full_gpu(adev, false); - if (adev->runpm) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); @@ -170,10 +167,17 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } if (amdgpu_device_supports_boco(dev) && - (amdgpu_runtime_pm != 0)) /* enable runpm by default */ + (amdgpu_runtime_pm != 0)) /* enable runpm by default for boco */ + adev->runpm = true; + else if (amdgpu_device_supports_baco(dev) && + (amdgpu_runtime_pm != 0) && + (adev->asic_type >= CHIP_TOPAZ) && + (adev->asic_type != CHIP_VEGA10) && + (adev->asic_type != CHIP_VEGA20) && + (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */ adev->runpm = true; else if (amdgpu_device_supports_baco(dev) && - (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */ + (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 on CI */ adev->runpm = true; /* Call ACPI methods: require modeset init @@ -1110,14 +1114,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, /** * amdgpu_get_vblank_counter_kms - get frame count * - * @dev: drm dev pointer - * @pipe: crtc to get the frame count from + * @crtc: crtc to get the frame count from * * Gets the frame count on the requested crtc (all asics). * Returns frame count on success, -EINVAL on failure. */ -u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) +u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; struct amdgpu_device *adev = dev->dev_private; int vpos, hpos, stat; u32 count; @@ -1177,14 +1182,15 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) /** * amdgpu_enable_vblank_kms - enable vblank interrupt * - * @dev: drm dev pointer - * @pipe: crtc to enable vblank interrupt for + * @crtc: crtc to enable vblank interrupt for * * Enable the interrupt on the requested crtc (all asics). * Returns 0 on success, -EINVAL on failure. */ -int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) +int amdgpu_enable_vblank_kms(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; struct amdgpu_device *adev = dev->dev_private; int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); @@ -1194,13 +1200,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) /** * amdgpu_disable_vblank_kms - disable vblank interrupt * - * @dev: drm dev pointer - * @pipe: crtc to disable vblank interrupt for + * @crtc: crtc to disable vblank interrupt for * * Disable the interrupt on the requested crtc (all asics). */ -void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) +void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; struct amdgpu_device *adev = dev->dev_private; int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c index 676c48c02d77..ead3dc572ec5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "mmhub_err_count", - .debugfs_name = "mmhub_err_inject", }; if (!adev->mmhub.ras_if) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 1cd78940cf82..e89fb35fec71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs { int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); }; struct amdgpu_mmhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index eb9975f4decb..37ba07e2feb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -612,6 +612,11 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode); int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc); +bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, + bool in_vblank_irq, int *vpos, + int *hpos, ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); + /* fbdev layer */ int amdgpu_fbdev_init(struct amdgpu_device *adev); void amdgpu_fbdev_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 7d5c3a9de9ea..6201a5f4b4fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "pcie_bif_err_count", - .debugfs_name = "pcie_bif_err_inject", }; if (!adev->nbio.ras_if) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e3f16b49e970..c687f5415b3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -31,6 +31,7 @@ */ #include <linux/list.h> #include <linux/slab.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> #include <drm/drm_cache.h> @@ -925,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return 0; } + if (bo->tbo.base.import_attach) + dma_buf_pin(bo->tbo.base.import_attach); + bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; /* force to pin into visible video ram */ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) @@ -1008,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) amdgpu_bo_subtract_pin_size(bo); + if (bo->tbo.base.import_attach) + dma_buf_unpin(bo->tbo.base.import_attach); + for (i = 0; i < bo->placement.num_placement; i++) { bo->placements[i].lpfn = 0; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; @@ -1274,6 +1281,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, amdgpu_bo_kunmap(abo); + if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && + bo->mem.mem_type != TTM_PL_SYSTEM) + dma_buf_move_notify(abo->tbo.base.dma_buf); + /* remember the eviction */ if (evict) atomic64_inc(&adev->num_evictions); @@ -1307,6 +1318,12 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (abo->kfd_bo) amdgpu_amdkfd_unreserve_memory_limit(abo); + /* We only remove the fence if the resv has individualized. */ + WARN_ON_ONCE(bo->type == ttm_bo_type_kernel + && bo->base.resv != &bo->base._resv); + if (bo->base.resv == &bo->base._resv) + amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); + if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; @@ -1403,30 +1420,52 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, } /** - * amdgpu_sync_wait_resv - Wait for BO reservation fences + * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences * - * @bo: buffer object + * @adev: amdgpu device pointer + * @resv: reservation object to sync to + * @sync_mode: synchronization mode * @owner: fence owner * @intr: Whether the wait is interruptible * + * Extract the fences from the reservation object and waits for them to finish. + * * Returns: * 0 on success, errno otherwise. */ -int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) +int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode, void *owner, + bool intr) { - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_sync sync; int r; amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false); + amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner); r = amdgpu_sync_wait(&sync, intr); amdgpu_sync_free(&sync); - return r; } /** + * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv + * @bo: buffer object to wait for + * @owner: fence owner + * @intr: Whether the wait is interruptible + * + * Wrapper to wait for fences in a BO. + * Returns: + * 0 on success, errno otherwise. + */ +int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + + return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv, + AMDGPU_SYNC_NE_OWNER, owner, intr); +} + +/** * amdgpu_bo_gpu_offset - return GPU offset of bo * @bo: amdgpu object for which we query the offset * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 36dec51d1ef1..5e39ecd8cc28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); +int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode, void *owner, + bool intr); int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); int amdgpu_bo_validate(struct amdgpu_bo *bo); @@ -316,6 +319,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m); #endif +int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); bool amdgpu_bo_support_uswc(u64 bo_flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index b03b1eb7ba04..abe94a55ecad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -41,8 +41,6 @@ #include "hwmgr.h" #define WIDTH_4K 3840 -static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); - static const struct cg_flag_name clocks[] = { {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, @@ -91,9 +89,13 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) adev->pm.ac_power = true; else adev->pm.ac_power = false; - if (adev->powerplay.pp_funcs->enable_bapm) + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->enable_bapm) amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); mutex_unlock(&adev->pm.mutex); + + if (is_support_sw_smu(adev)) + smu_set_ac_dc(&adev->smu); } } @@ -3398,11 +3400,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file unique_id\n"); return ret; } - ret = amdgpu_debugfs_pm_init(adev); - if (ret) { - DRM_ERROR("Failed to register debugfs file for dpm!\n"); - return ret; - } if ((adev->asic_type >= CHIP_VEGA10) && !(adev->flags & AMD_IS_APU)) { @@ -3669,7 +3666,7 @@ static const struct drm_info_list amdgpu_pm_info_list[] = { }; #endif -static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) +int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h index 3da1da277805..5db0ef86e84c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h @@ -43,4 +43,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); +int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 146f96661b6b..deaa26808841 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -24,6 +24,7 @@ */ #include <linux/firmware.h> +#include <linux/dma-mapping.h> #include "amdgpu.h" #include "amdgpu_psp.h" @@ -38,6 +39,42 @@ static void psp_set_funcs(struct amdgpu_device *adev); +static int psp_sysfs_init(struct amdgpu_device *adev); +static void psp_sysfs_fini(struct amdgpu_device *adev); + +/* + * Due to DF Cstate management centralized to PMFW, the firmware + * loading sequence will be updated as below: + * - Load KDB + * - Load SYS_DRV + * - Load tOS + * - Load PMFW + * - Setup TMR + * - Load other non-psp fw + * - Load ASD + * - Load XGMI/RAS/HDCP/DTM TA if any + * + * This new sequence is required for + * - Arcturus + * - Navi12 and onwards + */ +static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + psp->pmfw_centralized_cstate_management = false; + + if (amdgpu_sriov_vf(adev)) + return; + + if (adev->flags & AMD_IS_APU) + return; + + if ((adev->asic_type == CHIP_ARCTURUS) || + (adev->asic_type >= CHIP_NAVI12)) + psp->pmfw_centralized_cstate_management = true; +} + static int psp_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -75,6 +112,8 @@ static int psp_early_init(void *handle) psp->adev = adev; + psp_check_pmfw_centralized_cstate_management(psp); + return 0; } @@ -101,6 +140,13 @@ static int psp_sw_init(void *handle) return ret; } + if (adev->asic_type == CHIP_NAVI10) { + ret= psp_sysfs_init(adev); + if (ret) { + return ret; + } + } + return 0; } @@ -117,6 +163,10 @@ static int psp_sw_fini(void *handle) release_firmware(adev->psp.ta_fw); adev->psp.ta_fw = NULL; } + + if (adev->asic_type == CHIP_NAVI10) + psp_sysfs_fini(adev); + return 0; } @@ -150,6 +200,7 @@ psp_cmd_submit_buf(struct psp_context *psp, int ret; int index; int timeout = 2000; + bool ras_intr = false; mutex_lock(&psp->mutex); @@ -174,7 +225,8 @@ psp_cmd_submit_buf(struct psp_context *psp, * because gpu reset thread triggered and lock resource should * be released for psp resume sequence. */ - if (amdgpu_ras_intr_triggered()) + ras_intr = amdgpu_ras_intr_triggered(); + if (ras_intr) break; msleep(1); amdgpu_asic_invalidate_hdp(psp->adev, NULL); @@ -187,7 +239,7 @@ psp_cmd_submit_buf(struct psp_context *psp, * during psp initialization to avoid breaking hw_init and it doesn't * return -EINVAL. */ - if (psp->cmd_buf_mem->resp.status || !timeout) { + if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { if (ucode) DRM_WARN("failed to load ucode id (%d) ", ucode->ucode_id); @@ -558,7 +610,7 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); } -static int psp_xgmi_terminate(struct psp_context *psp) +int psp_xgmi_terminate(struct psp_context *psp) { int ret; @@ -579,7 +631,7 @@ static int psp_xgmi_terminate(struct psp_context *psp) return 0; } -static int psp_xgmi_initialize(struct psp_context *psp) +int psp_xgmi_initialize(struct psp_context *psp) { struct ta_xgmi_shared_memory *xgmi_cmd; int ret; @@ -766,7 +818,7 @@ static int psp_ras_initialize(struct psp_context *psp) if (!psp->adev->psp.ta_ras_ucode_size || !psp->adev->psp.ta_ras_start_addr) { - dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n"); + dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); return 0; } @@ -850,7 +902,7 @@ static int psp_hdcp_initialize(struct psp_context *psp) if (!psp->adev->psp.ta_hdcp_ucode_size || !psp->adev->psp.ta_hdcp_start_addr) { - dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n"); + dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); return 0; } @@ -996,7 +1048,7 @@ static int psp_dtm_initialize(struct psp_context *psp) if (!psp->adev->psp.ta_dtm_ucode_size || !psp->adev->psp.ta_dtm_start_addr) { - dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n"); + dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); return 0; } @@ -1081,7 +1133,7 @@ static int psp_hw_start(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; int ret; - if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) { + if (!amdgpu_sriov_vf(adev)) { if (psp->kdb_bin_size && (psp->funcs->bootloader_load_kdb != NULL)) { ret = psp_bootloader_load_kdb(psp); @@ -1116,10 +1168,17 @@ static int psp_hw_start(struct psp_context *psp) return ret; } - ret = psp_tmr_load(psp); - if (ret) { - DRM_ERROR("PSP load tmr failed!\n"); - return ret; + /* + * For those ASICs with DF Cstate management centralized + * to PMFW, TMR setup should be performed after PMFW + * loaded and before other non-psp firmware loaded. + */ + if (!psp->pmfw_centralized_cstate_management) { + ret = psp_tmr_load(psp); + if (ret) { + DRM_ERROR("PSP load tmr failed!\n"); + return ret; + } } return 0; @@ -1316,9 +1375,10 @@ static int psp_np_fw_load(struct psp_context *psp) struct amdgpu_firmware_info *ucode; struct amdgpu_device* adev = psp->adev; - if (psp->autoload_supported) { + if (psp->autoload_supported || + psp->pmfw_centralized_cstate_management) { ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; - if (!ucode->fw) + if (!ucode->fw || amdgpu_sriov_vf(adev)) goto out; ret = psp_execute_np_fw_load(psp, ucode); @@ -1326,6 +1386,14 @@ static int psp_np_fw_load(struct psp_context *psp) return ret; } + if (psp->pmfw_centralized_cstate_management) { + ret = psp_tmr_load(psp); + if (ret) { + DRM_ERROR("PSP load tmr failed!\n"); + return ret; + } + } + out: for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; @@ -1333,7 +1401,9 @@ out: continue; if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && - (psp_smu_reload_quirk(psp) || psp->autoload_supported)) + (psp_smu_reload_quirk(psp) || + psp->autoload_supported || + psp->pmfw_centralized_cstate_management)) continue; if (amdgpu_sriov_vf(adev) && @@ -1444,16 +1514,6 @@ skip_memalloc: return ret; } - if (adev->gmc.xgmi.num_physical_nodes > 1) { - ret = psp_xgmi_initialize(psp); - /* Warning the XGMI seesion initialize failure - * Instead of stop driver initialization - */ - if (ret) - dev_err(psp->adev->dev, - "XGMI: Failed to initialize XGMI session\n"); - } - if (psp->adev->psp.ta_fw) { ret = psp_ras_initialize(psp); if (ret) @@ -1518,10 +1578,6 @@ static int psp_hw_fini(void *handle) void *tmr_buf; void **pptr; - if (adev->gmc.xgmi.num_physical_nodes > 1 && - psp->xgmi_context.initialized == 1) - psp_xgmi_terminate(psp); - if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); psp_dtm_terminate(psp); @@ -1777,6 +1833,97 @@ static int psp_set_powergating_state(void *handle, return 0; } +static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t fw_ver; + int ret; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { + DRM_INFO("PSP block is not ready yet."); + return -EBUSY; + } + + mutex_lock(&adev->psp.mutex); + ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); + mutex_unlock(&adev->psp.mutex); + + if (ret) { + DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); + return ret; + } + + return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); +} + +static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + void *cpu_addr; + dma_addr_t dma_addr; + int ret; + char fw_name[100]; + const struct firmware *usbc_pd_fw; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { + DRM_INFO("PSP block is not ready yet."); + return -EBUSY; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); + ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); + if (ret) + goto fail; + + /* We need contiguous physical mem to place the FW for psp to access */ + cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); + + ret = dma_mapping_error(adev->dev, dma_addr); + if (ret) + goto rel_buf; + + memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); + + /* + * x86 specific workaround. + * Without it the buffer is invisible in PSP. + * + * TODO Remove once PSP starts snooping CPU cache + */ +#ifdef CONFIG_X86 + clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); +#endif + + mutex_lock(&adev->psp.mutex); + ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); + mutex_unlock(&adev->psp.mutex); + +rel_buf: + dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); + release_firmware(usbc_pd_fw); + +fail: + if (ret) { + DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); + return ret; + } + + return count; +} + +static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, + psp_usbc_pd_fw_sysfs_read, + psp_usbc_pd_fw_sysfs_write); + + + const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, @@ -1795,6 +1942,21 @@ const struct amd_ip_funcs psp_ip_funcs = { .set_powergating_state = psp_set_powergating_state, }; +static int psp_sysfs_init(struct amdgpu_device *adev) +{ + int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); + + if (ret) + DRM_ERROR("Failed to create USBC PD FW control file!"); + + return ret; +} + +static void psp_sysfs_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); +} + static const struct amdgpu_psp_funcs psp_funcs = { .check_fw_loading_status = psp_check_fw_loading_status, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 611021514c52..297435c0c7c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -114,6 +114,8 @@ struct psp_funcs int (*mem_training)(struct psp_context *psp, uint32_t ops); uint32_t (*ring_get_wptr)(struct psp_context *psp); void (*ring_set_wptr)(struct psp_context *psp, uint32_t value); + int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr); + int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver); }; #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 @@ -264,6 +266,8 @@ struct psp_context atomic_t fence_value; /* flag to mark whether gfx fw autoload is supported or not */ bool autoload_supported; + /* flag to mark whether df cstate management centralized to PMFW */ + bool pmfw_centralized_cstate_management; /* xgmi ta firmware and buffer */ const struct firmware *ta_fw; @@ -349,6 +353,14 @@ struct amdgpu_psp_funcs { #define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp)) #define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value)) +#define psp_load_usbc_pd_fw(psp, dma_addr) \ + ((psp)->funcs->load_usbc_pd_fw ? \ + (psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL) + +#define psp_read_usbc_pd_fw(psp, fw_ver) \ + ((psp)->funcs->read_usbc_pd_fw ? \ + (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; @@ -362,6 +374,8 @@ int psp_gpu_reset(struct amdgpu_device *adev); int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, uint64_t cmd_gpu_addr, int cmd_size); +int psp_xgmi_initialize(struct psp_context *psp); +int psp_xgmi_terminate(struct psp_context *psp); int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index cef94e2169fe..ab379b44679c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -31,6 +31,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" +#include "amdgpu_xgmi.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" const char *ras_error_string[] = { @@ -280,6 +281,11 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * struct ras_debug_if data; int ret = 0; + if (amdgpu_ras_intr_triggered()) { + DRM_WARN("RAS WARN: error injection currently inaccessible\n"); + return size; + } + ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); if (ret) return -EINVAL; @@ -393,6 +399,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, .head = obj->head, }; + if (amdgpu_ras_intr_triggered()) + return snprintf(buf, PAGE_SIZE, + "Query currently inaccessible\n"); + if (amdgpu_ras_error_query(obj->adev, &info)) return -EINVAL; @@ -720,6 +730,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, if (adev->nbio.funcs->query_ras_error_count) adev->nbio.funcs->query_ras_error_count(adev, &err_data); break; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + amdgpu_xgmi_query_ras_error_count(adev, &err_data); + break; default: break; } @@ -742,20 +755,6 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, return 0; } -uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr) -{ - uint32_t df_inst_id; - - if ((!adev->df.funcs) || - (!adev->df.funcs->get_df_inst_id) || - (!adev->df.funcs->get_dram_base_addr)) - return addr; - - df_inst_id = adev->df.funcs->get_df_inst_id(adev); - - return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id); -} - /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -775,8 +774,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, /* Calculate XGMI relative offset */ if (adev->gmc.xgmi.num_physical_nodes > 1) { - block_info.address = get_xgmi_relative_phy_addr(adev, - block_info.address); + block_info.address = + amdgpu_xgmi_get_relative_phy_addr(adev, + block_info.address); } switch (info->head.block) { @@ -1122,6 +1122,32 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, &amdgpu_ras_debugfs_ops); } +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *obj; + struct ras_fs_if fs_info; + + /* + * it won't be called in resume path, no need to check + * suspend and gpu reset status + */ + if (!con) + return; + + amdgpu_ras_debugfs_create_ctrl_node(adev); + + list_for_each_entry(obj, &con->head, node) { + if (amdgpu_ras_is_supported(adev, obj->head.block) && + (obj->attr_inuse == 1)) { + sprintf(fs_info.debugfs_name, "%s_err_inject", + ras_block_str(obj->head.block)); + fs_info.head = obj->head; + amdgpu_ras_debugfs_create(adev, &fs_info); + } + } +} + void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, struct ras_common_if *head) { @@ -1154,7 +1180,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) static int amdgpu_ras_fs_init(struct amdgpu_device *adev) { amdgpu_ras_sysfs_create_feature_node(adev); - amdgpu_ras_debugfs_create_ctrl_node(adev); return 0; } @@ -1319,6 +1344,33 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) } /* ih end */ +/* traversal all IPs except NBIO to query error counter */ +static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *obj; + + if (!con) + return; + + list_for_each_entry(obj, &con->head, node) { + struct ras_query_if info = { + .head = obj->head, + }; + + /* + * PCIE_BIF IP has one different isr by ras controller + * interrupt, the specific ras counter query will be + * done in that isr. So skip such block from common + * sync flood interrupt isr calling. + */ + if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) + continue; + + amdgpu_ras_error_query(adev, &info); + } +} + /* recovery begin */ /* return 0 on success. @@ -1372,6 +1424,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) { struct amdgpu_ras *ras = container_of(work, struct amdgpu_ras, recovery_work); + struct amdgpu_device *remote_adev = NULL; + struct amdgpu_device *adev = ras->adev; + struct list_head device_list, *device_list_handle = NULL; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false); + + /* Build list of devices to query RAS related errors */ + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { + device_list_handle = &hive->device_list; + } else { + list_add_tail(&adev->gmc.xgmi.head, &device_list); + device_list_handle = &device_list; + } + + list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { + amdgpu_ras_log_on_err_counter(remote_adev); + } if (amdgpu_device_should_recover_gpu(ras->adev)) amdgpu_device_gpu_recover(ras->adev, 0); @@ -1713,18 +1781,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *hw_supported = 0; *supported = 0; - if (amdgpu_sriov_vf(adev) || + if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)) return; - if (adev->is_atom_fw && - (amdgpu_atomfirmware_mem_ecc_supported(adev) || - amdgpu_atomfirmware_sram_ecc_supported(adev))) - *hw_supported = AMDGPU_RAS_BLOCK_MASK; + if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { + DRM_INFO("HBM ECC is active.\n"); + *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else + DRM_INFO("HBM ECC is not presented.\n"); + + if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { + DRM_INFO("SRAM ECC is active.\n"); + *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else + DRM_INFO("SRAM ECC is not presented.\n"); + + /* hw_supported needs to be aligned with RAS block mask. */ + *hw_supported &= AMDGPU_RAS_BLOCK_MASK; *supported = amdgpu_ras_enable == 0 ? - 0 : *hw_supported & amdgpu_ras_mask; + 0 : *hw_supported & amdgpu_ras_mask; } int amdgpu_ras_init(struct amdgpu_device *adev) @@ -1825,8 +1905,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, goto interrupt; } - amdgpu_ras_debugfs_create(adev, fs_info); - r = amdgpu_ras_sysfs_create(adev, fs_info); if (r) goto sysfs; @@ -1835,7 +1913,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, cleanup: amdgpu_ras_sysfs_remove(adev, ras_block); sysfs: - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); interrupt: @@ -1852,7 +1929,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev, return; amdgpu_ras_sysfs_remove(adev, ras_block); - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); amdgpu_ras_feature_enable(adev, ras_block, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index a5fe29a9373e..55c3eceb390d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, struct ras_fs_if *head); +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); + void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, struct ras_common_if *head); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 2a8e04895595..c0096097bbcf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -25,10 +25,11 @@ #include "amdgpu.h" #include "amdgpu_ras.h" #include <linux/bits.h> -#include "smu_v11_0_i2c.h" +#include "atom.h" -#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8 -#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0 +#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0 +#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8 +#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0 /* * The 2 macros bellow represent the actual size in bytes that @@ -55,6 +56,45 @@ #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev +static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev, + uint16_t *i2c_addr) +{ + struct atom_context *atom_ctx = adev->mode_info.atom_context; + + if (!i2c_addr || !atom_ctx) + return false; + + if (strnstr(atom_ctx->vbios_version, + "D342", + sizeof(atom_ctx->vbios_version))) + *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342; + else + *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS; + + return true; +} + +static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, + uint16_t *i2c_addr) +{ + if (!i2c_addr) + return false; + + switch (adev->asic_type) { + case CHIP_VEGA20: + *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20; + break; + + case CHIP_ARCTURUS: + return __get_eeprom_i2c_addr_arct(adev, i2c_addr); + + default: + return false; + } + + return true; +} + static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr, unsigned char *buff) { @@ -83,6 +123,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, unsigned char *buff) { int ret = 0; + struct amdgpu_device *adev = to_amdgpu_device(control); struct i2c_msg msg = { .addr = 0, .flags = 0, @@ -96,15 +137,13 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, msg.addr = control->i2c_address; - ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); + ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1); if (ret < 1) DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret); return ret; } - - static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) { int i; @@ -212,32 +251,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) .buf = buff, }; - mutex_init(&control->tbl_mutex); - - switch (adev->asic_type) { - case CHIP_VEGA20: - control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20; - ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor); - break; - - case CHIP_ARCTURUS: - control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS; - ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor); - break; + /* Verify i2c adapter is initialized */ + if (!adev->pm.smu_i2c.algo) + return -ENOENT; - default: - return 0; - } + if (!__get_eeprom_i2c_addr(adev, &control->i2c_address)) + return -EINVAL; - if (ret) { - DRM_ERROR("Failed to init I2C controller, ret:%d", ret); - return ret; - } + mutex_init(&control->tbl_mutex); msg.addr = control->i2c_address; - /* Read/Create table header from EEPROM address 0 */ - ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); + ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1); if (ret < 1) { DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret); return ret; @@ -263,23 +288,6 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) return ret == 1 ? 0 : -EIO; } -void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control) -{ - struct amdgpu_device *adev = to_amdgpu_device(control); - - switch (adev->asic_type) { - case CHIP_VEGA20: - smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor); - break; - case CHIP_ARCTURUS: - smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor); - break; - - default: - return; - } -} - static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *record, unsigned char *buff) @@ -436,7 +444,7 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, control->next_addr += EEPROM_TABLE_RECORD_SIZE; } - ret = i2c_transfer(&control->eeprom_accessor, msgs, num); + ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num); if (ret < 1) { DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index ca78f812d436..7e8647a05df7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -44,7 +44,6 @@ struct amdgpu_ras_eeprom_table_header { struct amdgpu_ras_eeprom_control { struct amdgpu_ras_eeprom_table_header tbl_hdr; - struct i2c_adapter eeprom_accessor; uint32_t next_addr; unsigned int num_recs; struct mutex tbl_mutex; @@ -79,7 +78,6 @@ struct eeprom_table_record { }__attribute__((__packed__)); int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control); -void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index e5c83e164d82..a7e1d0425ed0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -48,9 +48,6 @@ * wptr. The GPU then starts fetching commands and executes * them until the pointers are equal again. */ -static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, - struct amdgpu_ring *ring); -static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); /** * amdgpu_ring_alloc - allocate space on the ring buffer @@ -154,76 +151,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) } /** - * amdgpu_ring_priority_put - restore a ring's priority - * - * @ring: amdgpu_ring structure holding the information - * @priority: target priority - * - * Release a request for executing at @priority - */ -void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - int i; - - if (!ring->funcs->set_priority) - return; - - if (atomic_dec_return(&ring->num_jobs[priority]) > 0) - return; - - /* no need to restore if the job is already at the lowest priority */ - if (priority == DRM_SCHED_PRIORITY_NORMAL) - return; - - mutex_lock(&ring->priority_mutex); - /* something higher prio is executing, no need to decay */ - if (ring->priority > priority) - goto out_unlock; - - /* decay priority to the next level with a job available */ - for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) { - if (i == DRM_SCHED_PRIORITY_NORMAL - || atomic_read(&ring->num_jobs[i])) { - ring->priority = i; - ring->funcs->set_priority(ring, i); - break; - } - } - -out_unlock: - mutex_unlock(&ring->priority_mutex); -} - -/** - * amdgpu_ring_priority_get - change the ring's priority - * - * @ring: amdgpu_ring structure holding the information - * @priority: target priority - * - * Request a ring's priority to be raised to @priority (refcounted). - */ -void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - if (!ring->funcs->set_priority) - return; - - if (atomic_inc_return(&ring->num_jobs[priority]) <= 0) - return; - - mutex_lock(&ring->priority_mutex); - if (priority <= ring->priority) - goto out_unlock; - - ring->priority = priority; - ring->funcs->set_priority(ring, priority); - -out_unlock: - mutex_unlock(&ring->priority_mutex); -} - -/** * amdgpu_ring_init - init driver ring struct. * * @adev: amdgpu_device pointer @@ -334,10 +261,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) atomic_set(&ring->num_jobs[i], 0); - if (amdgpu_debugfs_ring_init(adev, ring)) { - DRM_ERROR("Failed to register debugfs file for rings !\n"); - } - return 0; } @@ -351,12 +274,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, */ void amdgpu_ring_fini(struct amdgpu_ring *ring) { - ring->sched.ready = false; /* Not to finish a ring which is not initialized */ if (!(ring->adev) || !(ring->adev->rings[ring->idx])) return; + ring->sched.ready = false; + amdgpu_device_wb_free(ring->adev, ring->rptr_offs); amdgpu_device_wb_free(ring->adev, ring->wptr_offs); @@ -367,8 +291,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) &ring->gpu_addr, (void **)&ring->ring); - amdgpu_debugfs_ring_fini(ring); - dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; ring->me = 0; @@ -485,8 +407,8 @@ static const struct file_operations amdgpu_debugfs_ring_fops = { #endif -static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, - struct amdgpu_ring *ring) +int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev->ddev->primary; @@ -507,13 +429,6 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, return 0; } -static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) -{ -#if defined(CONFIG_DEBUG_FS) - debugfs_remove(ring->ent); -#endif -} - /** * amdgpu_ring_test_helper - tests ring and set sched readiness status * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 930316e60155..9a443013d70d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -167,9 +167,6 @@ struct amdgpu_ring_funcs { uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); void (*emit_tmz)(struct amdgpu_ring *ring, bool start); - /* priority functions */ - void (*set_priority) (struct amdgpu_ring *ring, - enum drm_sched_priority priority); /* Try to soft recover the ring to make the fence signal */ void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); int (*preempt_ib)(struct amdgpu_ring *ring); @@ -222,6 +219,7 @@ struct amdgpu_ring { struct mutex priority_mutex; /* protected by priority_mutex */ int priority; + bool has_high_prio; #if defined(CONFIG_DEBUG_FS) struct dentry *ent; @@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum drm_sched_priority priority); -void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum drm_sched_priority priority); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, struct amdgpu_irq_src *irq_src, unsigned irq_type); @@ -328,4 +322,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, int amdgpu_ring_test_helper(struct amdgpu_ring *ring); +int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring); +void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index d3d4707f2168..60bb3e8b3118 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -126,6 +126,9 @@ struct amdgpu_rlc_funcs { void (*stop)(struct amdgpu_device *adev); void (*reset)(struct amdgpu_device *adev); void (*start)(struct amdgpu_device *adev); + void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid); + void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v); + bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg); }; struct amdgpu_rlc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index a2ee30b16212..250a309e4dee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -70,7 +70,8 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t index = 0; int r; - if (vmid == 0 || !amdgpu_mcbp) + /* don't enable OS preemption on SDMA under SRIOV */ + if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp) return 0; r = amdgpu_sdma_get_index_from_ring(ring, &index); @@ -92,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info; struct ras_fs_if fs_info = { .sysfs_name = "sdma_err_count", - .debugfs_name = "sdma_err_inject", }; if (!ih_info) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 485335267d78..4b352206354b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs { void (*ras_fini)(struct amdgpu_device *adev); int (*query_ras_error_count)(struct amdgpu_device *adev, uint32_t instance, void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); }; struct amdgpu_sdma { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a09b6b9c27d1..b86392253696 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -202,18 +202,17 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence) * * @sync: sync object to add fences from reservation object to * @resv: reservation object with embedded fence - * @explicit_sync: true if we should only sync to the exclusive fence + * @mode: how owner affects which fences we sync to + * @owner: owner of the planned job submission * * Sync to the fence */ -int amdgpu_sync_resv(struct amdgpu_device *adev, - struct amdgpu_sync *sync, - struct dma_resv *resv, - void *owner, bool explicit_sync) +int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct dma_resv *resv, enum amdgpu_sync_mode mode, + void *owner) { struct dma_resv_list *flist; struct dma_fence *f; - void *fence_owner; unsigned i; int r = 0; @@ -229,30 +228,46 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, return r; for (i = 0; i < flist->shared_count; ++i) { + void *fence_owner; + f = rcu_dereference_protected(flist->shared[i], dma_resv_held(resv)); + + fence_owner = amdgpu_sync_get_owner(f); + + /* Always sync to moves, no matter what */ + if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) { + r = amdgpu_sync_fence(sync, f, false); + if (r) + break; + } + /* We only want to trigger KFD eviction fences on * evict or move jobs. Skip KFD fences otherwise. */ - fence_owner = amdgpu_sync_get_owner(f); if (fence_owner == AMDGPU_FENCE_OWNER_KFD && owner != AMDGPU_FENCE_OWNER_UNDEFINED) continue; - if (amdgpu_sync_same_dev(adev, f)) { - /* VM updates only sync with moves but not with user - * command submissions or KFD evictions fences - */ - if (owner == AMDGPU_FENCE_OWNER_VM && - fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) + /* Ignore fences depending on the sync mode */ + switch (mode) { + case AMDGPU_SYNC_ALWAYS: + break; + + case AMDGPU_SYNC_NE_OWNER: + if (amdgpu_sync_same_dev(adev, f) && + fence_owner == owner) continue; + break; - /* Ignore fence from the same owner and explicit one as - * long as it isn't undefined. - */ - if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && - (fence_owner == owner || explicit_sync)) + case AMDGPU_SYNC_EQ_OWNER: + if (amdgpu_sync_same_dev(adev, f) && + fence_owner != owner) continue; + break; + + case AMDGPU_SYNC_EXPLICIT: + continue; } r = amdgpu_sync_fence(sync, f, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index d62c2b81d92b..cfbe5788b8b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -31,6 +31,13 @@ struct dma_resv; struct amdgpu_device; struct amdgpu_ring; +enum amdgpu_sync_mode { + AMDGPU_SYNC_ALWAYS, + AMDGPU_SYNC_NE_OWNER, + AMDGPU_SYNC_EQ_OWNER, + AMDGPU_SYNC_EXPLICIT +}; + /* * Container for fences used to sync command submissions. */ @@ -43,11 +50,9 @@ void amdgpu_sync_create(struct amdgpu_sync *sync); int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit); int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence); -int amdgpu_sync_resv(struct amdgpu_device *adev, - struct amdgpu_sync *sync, - struct dma_resv *resv, - void *owner, - bool explicit_sync); +int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct dma_resv *resv, enum amdgpu_sync_mode mode, + void *owner); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c6e9885c071f..6309ff72bd78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -60,20 +60,14 @@ #include "amdgpu_ras.h" #include "bif/bif_4_1_d.h" +#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 + static int amdgpu_map_buffer(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, unsigned num_pages, uint64_t offset, unsigned window, struct amdgpu_ring *ring, uint64_t *addr); -static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); -static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); - -static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) -{ - return 0; -} - /** * amdgpu_init_mem_type - Initialize a memory manager for a specific type of * memory request. @@ -776,7 +770,6 @@ struct amdgpu_ttm_tt { static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { (1 << 0), /* HMM_PFN_VALID */ (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ }; static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { @@ -857,7 +850,7 @@ retry: range->notifier_seq = mmu_interval_read_begin(&bo->notifier); down_read(&mm->mmap_sem); - r = hmm_range_fault(range, 0); + r = hmm_range_fault(range); up_read(&mm->mmap_sem); if (unlikely(r <= 0)) { /* @@ -1034,7 +1027,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; - if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) { + if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { uint64_t page_idx = 1; r = amdgpu_gart_bind(adev, gtt->offset, page_idx, @@ -1042,7 +1035,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, if (r) goto gart_bind_fail; - /* Patch mtype of the second part BO */ + /* The memory type of the first page defaults to UC. Now + * modify the memory type to NC from the second page of + * the BO onward. + */ flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); @@ -1596,7 +1592,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, while (len && pos < adev->gmc.mc_vram_size) { uint64_t aligned_pos = pos & ~(uint64_t)3; - uint32_t bytes = 4 - (pos & 3); + uint64_t bytes = 4 - (pos & 3); uint32_t shift = (pos & 3) * 8; uint32_t mask = 0xffffffff << shift; @@ -1605,20 +1601,28 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, bytes = len; } - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); - if (!write || mask != 0xffffffff) - value = RREG32_NO_KIQ(mmMM_DATA); - if (write) { - value &= ~mask; - value |= (*(uint32_t *)buf << shift) & mask; - WREG32_NO_KIQ(mmMM_DATA, value); - } - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - if (!write) { - value = (value & mask) >> shift; - memcpy(buf, &value, bytes); + if (mask != 0xffffffff) { + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); + WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); + if (!write || mask != 0xffffffff) + value = RREG32_NO_KIQ(mmMM_DATA); + if (write) { + value &= ~mask; + value |= (*(uint32_t *)buf << shift) & mask; + WREG32_NO_KIQ(mmMM_DATA, value); + } + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + if (!write) { + value = (value & mask) >> shift; + memcpy(buf, &value, bytes); + } + } else { + bytes = (nodes->start + nodes->size) << PAGE_SHIFT; + bytes = min(bytes - pos, (uint64_t)len & ~0x3ull); + + amdgpu_device_vram_access(adev, pos, (uint32_t *)buf, + bytes, write); } ret += bytes; @@ -1638,7 +1642,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, - .invalidate_caches = &amdgpu_invalidate_caches, .init_mem_type = &amdgpu_init_mem_type, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, @@ -1836,9 +1839,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) *The reserved vram for memory training must be pinned to the specified *place on the VRAM, so reserve it early. */ - r = amdgpu_ttm_training_reserve_vram_init(adev); - if (r) - return r; + if (!amdgpu_sriov_vf(adev)) { + r = amdgpu_ttm_training_reserve_vram_init(adev); + if (r) + return r; + } /* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to @@ -1911,12 +1916,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } - /* Register debugfs entries for amdgpu_ttm */ - r = amdgpu_ttm_debugfs_init(adev); - if (r) { - DRM_ERROR("Failed to init debugfs\n"); - return r; - } return 0; } @@ -1938,7 +1937,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (!adev->mman.initialized) return; - amdgpu_ttm_debugfs_fini(adev); amdgpu_ttm_training_reserve_vram_fini(adev); /* return the IP Discovery TMR memory back to VRAM */ amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); @@ -2113,8 +2111,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, } if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_FENCE_OWNER_UNDEFINED, - false); + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { DRM_ERROR("sync failed (%d).\n", r); goto error_free; @@ -2198,7 +2196,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_FENCE_OWNER_UNDEFINED, false); + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { DRM_ERROR("sync failed (%d).\n", r); goto error_free; @@ -2279,7 +2278,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, { struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; - int r; if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -2287,27 +2285,19 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, if (*pos >= adev->gmc.mc_vram_size) return -ENXIO; + size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos)); while (size) { - unsigned long flags; - uint32_t value; - - if (*pos >= adev->gmc.mc_vram_size) - return result; - - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); - value = RREG32_NO_KIQ(mmMM_DATA); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4); + uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ]; - r = put_user(value, (uint32_t *)buf); - if (r) - return r; + amdgpu_device_vram_access(adev, *pos, value, bytes, false); + if (copy_to_user(buf, value, bytes)) + return -EFAULT; - result += 4; - buf += 4; - *pos += 4; - size -= 4; + result += bytes; + buf += bytes; + *pos += bytes; + size -= bytes; } return result; @@ -2544,7 +2534,7 @@ static const struct { #endif -static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) +int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) unsigned count; @@ -2579,13 +2569,3 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) return 0; #endif } - -static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned i; - - for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) - debugfs_remove(adev->mman.debugfs_entries[i]); -#endif -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 0dddedc06ae3..bd05bbb4878d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -133,4 +133,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); +int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index f4d40855147b..9dd51f0d2c11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) int r; struct ras_fs_if fs_info = { .sysfs_name = "umc_err_count", - .debugfs_name = "umc_err_inject", }; struct ras_ih_if ih_info = { .cb = amdgpu_umc_process_ras_data_cb, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index a92f3b18e657..5fd32ad1c575 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1099,7 +1099,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err_free; } else { r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv, - AMDGPU_FENCE_OWNER_UNDEFINED, false); + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); if (r) goto err_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index f96464e2c157..a41272fbcba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; long r; - /* temporarily disable ib test for sriov */ - if (amdgpu_sriov_vf(adev)) - return 0; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); if (r) goto error; @@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_ring_alloc(ring, 16); if (r) return r; @@ -656,15 +654,10 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; struct amdgpu_bo *bo = NULL; long r; - /* temporarily disable ib test for sriov */ - if (amdgpu_sriov_vf(adev)) - return 0; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &bo, NULL, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index daaf909d009a..f0128f745bd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -270,6 +270,9 @@ struct amdgpu_virt { #define amdgpu_sriov_runtime(adev) \ ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) +#define amdgpu_sriov_fullaccess(adev) \ +(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev))) + #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d16231d6a790..6d9252a27916 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -120,23 +120,17 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, unsigned level) { - unsigned shift = 0xff; - switch (level) { case AMDGPU_VM_PDB2: case AMDGPU_VM_PDB1: case AMDGPU_VM_PDB0: - shift = 9 * (AMDGPU_VM_PDB0 - level) + + return 9 * (AMDGPU_VM_PDB0 - level) + adev->vm_manager.block_size; - break; case AMDGPU_VM_PTB: - shift = 0; - break; + return 0; default: - dev_err(adev->dev, "the level%d isn't supported.\n", level); + return ~0; } - - return shift; } /** @@ -235,19 +229,6 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) else list_move_tail(&vm_bo->vm_status, &vm->evicted); } - -/** - * amdgpu_vm_bo_relocated - vm_bo is reloacted - * - * @vm_bo: vm_bo which is relocated - * - * State for PDs/PTs which needs to update their parent PD. - */ -static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) -{ - list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); -} - /** * amdgpu_vm_bo_moved - vm_bo is moved * @@ -291,6 +272,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) } /** + * amdgpu_vm_bo_relocated - vm_bo is reloacted + * + * @vm_bo: vm_bo which is relocated + * + * State for PDs/PTs which needs to update their parent PD. + * For the root PD, just move to idle state. + */ +static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) +{ + if (vm_bo->bo->parent) + list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); + else + amdgpu_vm_bo_idle(vm_bo); +} + +/** * amdgpu_vm_bo_done - vm_bo is done * * @vm_bo: vm_bo which is now done @@ -588,8 +585,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, { entry->priority = 0; entry->tv.bo = &vm->root.base.bo->tbo; - /* One for TTM and one for the CS job */ - entry->tv.num_shared = 2; + /* Two for VM updates, one for TTM and one for the CS job */ + entry->tv.num_shared = 4; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } @@ -697,10 +694,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, amdgpu_vm_bo_moved(bo_base); } else { vm->update_funcs->map_table(bo); - if (bo->parent) - amdgpu_vm_bo_relocated(bo_base); - else - amdgpu_vm_bo_idle(bo_base); + amdgpu_vm_bo_relocated(bo_base); } } @@ -803,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, params.vm = vm; params.direct = direct; - r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL); + r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); if (r) return r; @@ -1086,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence *fence = NULL; bool pasid_mapping_needed = false; unsigned patch_offset = 0; + bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); int r; + if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); + if (amdgpu_vmid_had_gpu_reset(adev, id)) { gds_switch_needed = true; vm_flush_needed = true; @@ -1299,7 +1297,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, params.vm = vm; params.direct = direct; - r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL); + r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); if (r) return r; @@ -1448,21 +1446,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, uint64_t incr, entry_end, pe_start; struct amdgpu_bo *pt; - /* make sure that the page tables covering the address range are - * actually allocated - */ - r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor, - params->direct); - if (r) - return r; - - pt = cursor.entry->base.bo; - - /* The root level can't be a huge page */ - if (cursor.level == adev->vm_manager.root_level) { - if (!amdgpu_vm_pt_descendant(adev, &cursor)) - return -ENOENT; - continue; + if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { + /* make sure that the page tables covering the + * address range are actually allocated + */ + r = amdgpu_vm_alloc_pts(params->adev, params->vm, + &cursor, params->direct); + if (r) + return r; } shift = amdgpu_vm_level_shift(adev, cursor.level); @@ -1480,25 +1471,38 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * smaller than the address shift. Go to the next * child entry and try again. */ - if (!amdgpu_vm_pt_descendant(adev, &cursor)) - return -ENOENT; - continue; - } else if (frag >= parent_shift && - cursor.level - 1 != adev->vm_manager.root_level) { + if (amdgpu_vm_pt_descendant(adev, &cursor)) + continue; + } else if (frag >= parent_shift) { /* If the fragment size is even larger than the parent - * shift we should go up one level and check it again - * unless one level up is the root level. + * shift we should go up one level and check it again. */ if (!amdgpu_vm_pt_ancestor(&cursor)) - return -ENOENT; + return -EINVAL; continue; } + pt = cursor.entry->base.bo; + if (!pt) { + /* We need all PDs and PTs for mapping something, */ + if (flags & AMDGPU_PTE_VALID) + return -ENOENT; + + /* but unmapping something can happen at a higher + * level. + */ + if (!amdgpu_vm_pt_ancestor(&cursor)) + return -EINVAL; + + pt = cursor.entry->base.bo; + shift = parent_shift; + } + /* Looks good so far, calculate parameters for the update */ incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; mask = amdgpu_vm_entries_mask(adev, cursor.level); pe_start = ((cursor.pfn >> shift) & mask) * 8; - entry_end = (uint64_t)(mask + 1) << shift; + entry_end = ((uint64_t)mask + 1) << shift; entry_end += cursor.pfn & ~(entry_end - 1); entry_end = min(entry_end, end); @@ -1506,6 +1510,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, uint64_t upd_end = min(entry_end, frag_end); unsigned nptes = (upd_end - frag_start) >> shift; + /* This can happen when we set higher level PDs to + * silent to stop fault floods. + */ + nptes = max(nptes, 1u); amdgpu_vm_update_flags(params, pt, cursor.level, pe_start, dst, nptes, incr, flags | AMDGPU_PTE_FRAG(frag)); @@ -1550,7 +1558,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * @adev: amdgpu_device pointer * @vm: requested vm * @direct: direct submission in a page fault - * @exclusive: fence we need to sync to + * @resv: fences we need to sync to * @start: start of mapped range * @last: last mapped entry * @flags: flags for the entries @@ -1565,14 +1573,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool direct, - struct dma_fence *exclusive, + struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t addr, dma_addr_t *pages_addr, struct dma_fence **fence) { struct amdgpu_vm_update_params params; - void *owner = AMDGPU_FENCE_OWNER_VM; + enum amdgpu_sync_mode sync_mode; int r; memset(¶ms, 0, sizeof(params)); @@ -1581,9 +1589,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.direct = direct; params.pages_addr = pages_addr; - /* sync to everything except eviction fences on unmapping */ + /* Implicitly sync to command submissions in the same VM before + * unmapping. Sync to moving fences before mapping. + */ if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_KFD; + sync_mode = AMDGPU_SYNC_EQ_OWNER; + else + sync_mode = AMDGPU_SYNC_EXPLICIT; amdgpu_vm_eviction_lock(vm); if (vm->evicting) { @@ -1591,7 +1603,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, goto error_unlock; } - r = vm->update_funcs->prepare(¶ms, owner, exclusive); + if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { + struct amdgpu_bo *root = vm->root.base.bo; + + if (!dma_fence_is_signaled(vm->last_direct)) + amdgpu_bo_fence(root, vm->last_direct, true); + } + + r = vm->update_funcs->prepare(¶ms, resv, sync_mode); if (r) goto error_unlock; @@ -1610,7 +1629,7 @@ error_unlock: * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks * * @adev: amdgpu_device pointer - * @exclusive: fence we need to sync to + * @resv: fences we need to sync to * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @mapping: mapped range and flags to use for the update @@ -1626,7 +1645,7 @@ error_unlock: * 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, - struct dma_fence *exclusive, + struct dma_resv *resv, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, @@ -1696,13 +1715,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, AMDGPU_GPU_PAGES_IN_CPU_PAGE; } - } else if (flags & AMDGPU_PTE_VALID) { + } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { addr += bo_adev->vm_manager.vram_base_offset; addr += pfn << PAGE_SHIFT; } last = min((uint64_t)mapping->last, start + max_entries - 1); - r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, + r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv, start, last, flags, addr, dma_addr, fence); if (r) @@ -1741,7 +1760,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, dma_addr_t *pages_addr = NULL; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; - struct dma_fence *exclusive, **last_update; + struct dma_fence **last_update; + struct dma_resv *resv; uint64_t flags; struct amdgpu_device *bo_adev = adev; int r; @@ -1749,7 +1769,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, if (clear || !bo) { mem = NULL; nodes = NULL; - exclusive = NULL; + resv = vm->root.base.bo->tbo.base.resv; } else { struct ttm_dma_tt *ttm; @@ -1759,7 +1779,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - exclusive = bo->tbo.moving; + resv = bo->tbo.base.resv; } if (bo) { @@ -1769,7 +1789,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, flags = 0x0; } - if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)) + if (clear || (bo && bo->tbo.base.resv == + vm->root.base.bo->tbo.base.resv)) last_update = &vm->last_update; else last_update = &bo_va->last_pt_update; @@ -1783,7 +1804,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, + r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm, mapping, flags, bo_adev, nodes, last_update); if (r) @@ -1978,6 +1999,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) { + struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; struct amdgpu_bo_va_mapping *mapping; uint64_t init_pte_value = 0; struct dma_fence *f = NULL; @@ -1992,7 +2014,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; - r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, + r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv, mapping->start, mapping->last, init_pte_value, 0, NULL, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); @@ -2563,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return false; /* Don't evict VM page tables while they are updated */ - if (!dma_fence_is_signaled(bo_base->vm->last_direct) || - !dma_fence_is_signaled(bo_base->vm->last_delayed)) { + if (!dma_fence_is_signaled(bo_base->vm->last_direct)) { amdgpu_vm_eviction_unlock(bo_base->vm); return false; } @@ -2741,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) if (timeout <= 0) return timeout; - timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout); - if (timeout <= 0) - return timeout; - - return dma_fence_wait_timeout(vm->last_delayed, true, timeout); + return dma_fence_wait_timeout(vm->last_direct, true, timeout); } /** @@ -2818,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; vm->last_direct = dma_fence_get_stub(); - vm->last_delayed = dma_fence_get_stub(); mutex_init(&vm->eviction_lock); vm->evicting = false; @@ -2873,7 +2889,6 @@ error_free_root: error_free_delayed: dma_fence_put(vm->last_direct); - dma_fence_put(vm->last_delayed); drm_sched_entity_destroy(&vm->delayed); error_free_direct: @@ -3076,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) dma_fence_wait(vm->last_direct, false); dma_fence_put(vm->last_direct); - dma_fence_wait(vm->last_delayed, false); - dma_fence_put(vm->last_delayed); list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { @@ -3188,6 +3201,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_vm *args = data; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; + long timeout = msecs_to_jiffies(2000); int r; switch (args->in.op) { @@ -3199,6 +3213,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: + if (amdgpu_sriov_runtime(adev)) + timeout = 8 * timeout; + + /* Wait vm idle to make sure the vmid set in SPM_VMID is + * not referenced anymore. + */ + r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true); + if (r) + return r; + + r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); + if (r < 0) + return r; + + amdgpu_bo_unreserve(fpriv->vm.root.base.bo); amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index b4640ab38c95..06fe30e1492d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -227,8 +227,8 @@ struct amdgpu_vm_update_params { struct amdgpu_vm_update_funcs { int (*map_table)(struct amdgpu_bo *bo); - int (*prepare)(struct amdgpu_vm_update_params *p, void * owner, - struct dma_fence *exclusive); + int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode); int (*update)(struct amdgpu_vm_update_params *p, struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); @@ -276,7 +276,6 @@ struct amdgpu_vm { /* Last submission to the scheduler entities */ struct dma_fence *last_direct; - struct dma_fence *last_delayed; unsigned int pasid; /* dedicated to vm */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 73fec7a0ced5..e38516304070 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table) * Returns: * Negativ errno, 0 for success. */ -static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, - struct dma_fence *exclusive) +static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, + struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode) { - int r; - - /* Wait for any BO move to be completed */ - if (exclusive) { - r = dma_fence_wait(exclusive, true); - if (unlikely(r)) - return r; - } - - /* Don't wait for submissions during page fault */ - if (p->direct) + if (!resv) return 0; - /* Wait for PT BOs to be idle. PTs share the same resv. object - * as the root PD BO - */ - return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true); + return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true); } /** @@ -86,6 +74,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, { unsigned int i; uint64_t value; + int r; + + if (bo->tbo.moving) { + r = dma_fence_wait(bo->tbo.moving, true); + if (r) + return r; + } pe += (unsigned long)amdgpu_bo_kptr(bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 19b7f80758f1..cf96c335b258 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table) * Negativ errno, 0 for success. */ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, - void *owner, struct dma_fence *exclusive) + struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode) { - struct amdgpu_bo *root = p->vm->root.base.bo; unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; int r; @@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, p->num_dw_left = ndw; - /* Wait for moves to be completed */ - r = amdgpu_sync_fence(&p->job->sync, exclusive, false); - if (r) - return r; - - /* Don't wait for any submissions during page fault handling */ - if (p->direct) + if (!resv) return 0; - return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, - owner, false); + return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm); } /** @@ -111,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, if (r) goto error; - tmp = dma_fence_get(f); - if (p->direct) + if (p->direct) { + tmp = dma_fence_get(f); swap(p->vm->last_direct, tmp); - else - swap(p->vm->last_delayed, tmp); - dma_fence_put(tmp); + dma_fence_put(tmp); + } else { + dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f); + } if (fence && !p->direct) swap(*fence, f); @@ -147,7 +141,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, src += p->num_dw_left * 4; - pe += amdgpu_bo_gpu_offset(bo); + pe += amdgpu_gmc_sign_extend(bo->tbo.offset); trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct); amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); @@ -174,7 +168,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, { struct amdgpu_ib *ib = p->job->ibs; - pe += amdgpu_bo_gpu_offset(bo); + pe += amdgpu_gmc_sign_extend(bo->tbo.offset); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); if (count < 3) { amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, @@ -208,6 +202,11 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, uint64_t *pte; int r; + /* Wait for PD/PT moves to be completed */ + r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false); + if (r) + return r; + do { ndw = p->num_dw_left; ndw -= p->job->ibs->length_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index a97af422575a..95b3327168ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -26,7 +26,12 @@ #include "amdgpu_xgmi.h" #include "amdgpu_smu.h" #include "amdgpu_ras.h" +#include "soc15.h" #include "df/df_3_6_offset.h" +#include "xgmi/xgmi_4_0_0_smn.h" +#include "xgmi/xgmi_4_0_0_sh_mask.h" +#include "wafl/wafl2_4_0_0_smn.h" +#include "wafl/wafl2_4_0_0_sh_mask.h" static DEFINE_MUTEX(xgmi_mutex); @@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex); static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; static unsigned hive_count = 0; +static const int xgmi_pcs_err_status_reg_vg20[] = { + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, +}; + +static const int wafl_pcs_err_status_reg_vg20[] = { + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, +}; + +static const int xgmi_pcs_err_status_reg_arct[] = { + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000, +}; + +/* same as vg20*/ +static const int wafl_pcs_err_status_reg_arct[] = { + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, +}; + +static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { + {"XGMI PCS DataLossErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, + {"XGMI PCS TrainingErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)}, + {"XGMI PCS CRCErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)}, + {"XGMI PCS BERExceededErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)}, + {"XGMI PCS TxMetaDataErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)}, + {"XGMI PCS ReplayBufParityErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"XGMI PCS DataParityErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)}, + {"XGMI PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"XGMI PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"XGMI PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"XGMI PCS DeskewErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)}, + {"XGMI PCS DataStartupLimitErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"XGMI PCS FCInitTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"XGMI PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"XGMI PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"XGMI PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"XGMI PCS RecoveryAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"XGMI PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, +}; + +static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = { + {"WAFL PCS DataLossErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)}, + {"WAFL PCS TrainingErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)}, + {"WAFL PCS CRCErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)}, + {"WAFL PCS BERExceededErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)}, + {"WAFL PCS TxMetaDataErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)}, + {"WAFL PCS ReplayBufParityErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"WAFL PCS DataParityErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)}, + {"WAFL PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"WAFL PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"WAFL PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"WAFL PCS DeskewErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)}, + {"WAFL PCS DataStartupLimitErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"WAFL PCS FCInitTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"WAFL PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"WAFL PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"WAFL PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"WAFL PCS RecoveryAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"WAFL PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, +}; + void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive) { return &hive->device_list; @@ -365,6 +473,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) return 0; if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + ret = psp_xgmi_initialize(&adev->psp); + if (ret) { + dev_err(adev->dev, + "XGMI: Failed to initialize xgmi session\n"); + return ret; + } + ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); if (ret) { dev_err(adev->dev, @@ -451,16 +566,16 @@ exit: return ret; } -void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) +int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) { struct amdgpu_hive_info *hive; if (!adev->gmc.xgmi.supported) - return; + return -EINVAL; hive = amdgpu_get_xgmi_hive(adev, 1); if (!hive) - return; + return -EINVAL; if (!(hive->number_devices--)) { amdgpu_xgmi_sysfs_destroy(adev, hive); @@ -471,6 +586,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); mutex_unlock(&hive->hive_lock); } + + return psp_xgmi_terminate(&adev->psp); } int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) @@ -481,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "xgmi_wafl_err_count", - .debugfs_name = "xgmi_wafl_err_inject", }; if (!adev->gmc.xgmi.supported || @@ -521,3 +637,129 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) kfree(ras_if); } } + +uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, + uint64_t addr) +{ + uint32_t df_inst_id; + uint64_t dram_base_addr = 0; + const struct amdgpu_df_funcs *df_funcs = adev->df.funcs; + + if ((!df_funcs) || + (!df_funcs->get_df_inst_id) || + (!df_funcs->get_dram_base_addr)) { + dev_warn(adev->dev, + "XGMI: relative phy_addr algorithm is not supported\n"); + return addr; + } + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) { + dev_warn(adev->dev, + "failed to disable DF-Cstate, DF register may not be accessible\n"); + return addr; + } + + df_inst_id = df_funcs->get_df_inst_id(adev); + dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id); + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + dev_warn(adev->dev, "failed to enable DF-Cstate\n"); + + return addr + dram_base_addr; +} + +static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, + uint32_t value, + uint32_t *ue_count, + uint32_t *ce_count, + bool is_xgmi_pcs) +{ + int i; + int ue_cnt; + + if (is_xgmi_pcs) { + /* query xgmi pcs error status, + * only ue is supported */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) { + ue_cnt = (value & + xgmi_pcs_ras_fields[i].pcs_err_mask) >> + xgmi_pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + xgmi_pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; + } + } + } else { + /* query wafl pcs error status, + * only ue is supported */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) { + ue_cnt = (value & + wafl_pcs_ras_fields[i].pcs_err_mask) >> + wafl_pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + wafl_pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; + } + } + } + + return 0; +} + +int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + int i; + uint32_t data; + uint32_t ue_cnt = 0, ce_cnt = 0; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + /* check xgmi pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) { + data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) { + data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + case CHIP_VEGA20: + default: + /* check xgmi pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) { + data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) { + data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + } + + err_data->ue_count += ue_cnt; + err_data->ce_count += ce_cnt; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 74011fbc2251..4a92067fe595 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -37,15 +37,25 @@ struct amdgpu_hive_info { struct task_barrier tb; }; +struct amdgpu_pcs_ras_field { + const char *err_name; + uint32_t pcs_err_mask; + uint32_t pcs_err_shift; +}; + struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); int amdgpu_xgmi_add_device(struct amdgpu_device *adev); -void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); +int amdgpu_xgmi_remove_device(struct amdgpu_device *adev); int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate); int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev); void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev); +uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, + uint64_t addr); +int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index dd30f4e61a8c..cae426c7c086 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 5000)) { - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > 10000)) { + DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); ctx->abort = true; } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index ea702a64f807..9b74cfdba7b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -186,16 +186,10 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector) { - int ret; - amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd; - amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev; amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer; - ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux); - if (!ret) - amdgpu_connector->ddc_bus->has_aux = true; - - WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret); + drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux); + amdgpu_connector->ddc_bus->has_aux = true; } /***** general DP utility functions *****/ diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 006f21ef7ddf..62635e58e45e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev) int r; if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); r = amdgpu_dpm_baco_reset(adev); } else { r = cik_asic_pci_config_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 40d2ac723dd6..2512e7ebfedf 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2494,6 +2494,10 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_v10_0_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2685,6 +2689,7 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = { .prepare = dce_v10_0_crtc_prepare, .commit = dce_v10_0_crtc_commit, .disable = dce_v10_0_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 898ef72d423c..0dde22db9848 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2573,6 +2573,10 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_v11_0_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2793,6 +2797,7 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { .prepare = dce_v11_0_crtc_prepare, .commit = dce_v11_0_crtc_commit, .disable = dce_v11_0_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index db15a112becc..84219534bd38 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2388,6 +2388,10 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_v6_0_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2575,6 +2579,7 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = { .prepare = dce_v6_0_crtc_prepare, .commit = dce_v6_0_crtc_commit, .disable = dce_v6_0_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index f06c9022c1fd..3a640702d7d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2395,6 +2395,10 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_v8_0_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2593,6 +2597,7 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = { .prepare = dce_v8_0_crtc_prepare, .commit = dce_v8_0_crtc_commit, .disable = dce_v8_0_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index e4f94863332c..13e12be667fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -123,6 +123,10 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_virtual_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -218,6 +222,7 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = { .prepare = dce_virtual_crtc_prepare, .commit = dce_virtual_crtc_commit, .disable = dce_virtual_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) @@ -609,7 +614,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_connector_register(connector); /* link them */ drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 02702597ddeb..0e0daf0021b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -35,6 +35,8 @@ #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" +#include "smuio/smuio_11_0_0_offset.h" +#include "smuio/smuio_11_0_0_sh_mask.h" #include "navi10_enum.h" #include "hdp/hdp_5_0_0_offset.h" #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h" @@ -222,6 +224,49 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000) }; +static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v) +{ + static void *scratch_reg0; + static void *scratch_reg1; + static void *scratch_reg2; + static void *scratch_reg3; + static void *spare_int; + static uint32_t grbm_cntl; + static uint32_t grbm_idx; + uint32_t i = 0; + uint32_t retries = 50000; + + scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4; + scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4; + scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4; + scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4; + spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4; + + grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; + grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; + + if (amdgpu_sriov_runtime(adev)) { + pr_err("shouldn't call rlcg write register during runtime\n"); + return; + } + + writel(v, scratch_reg0); + writel(offset | 0x80000000, scratch_reg1); + writel(1, spare_int); + for (i = 0; i < retries; i++) { + u32 tmp; + + tmp = readl(scratch_reg1); + if (!(tmp & 0x80000000)) + break; + + udelay(10); + } + + if (i >= retries) + pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); +} + static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = { /* Pending on emulation bring up */ @@ -500,29 +545,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; - uint32_t scratch; - uint32_t tmp = 0; + unsigned index; + uint64_t gpu_addr; + uint32_t tmp; long r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); + r = amdgpu_device_wb_get(adev, &index); + if (r) return r; - } - - WREG32(scratch, 0xCAFEDEAD); + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + r = amdgpu_ib_get(adev, NULL, 16, &ib); + if (r) goto err1; - } - ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); - ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); - ib.ptr[2] = 0xDEADBEEF; - ib.length_dw = 3; + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); + ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; + ib.ptr[2] = lower_32_bits(gpu_addr); + ib.ptr[3] = upper_32_bits(gpu_addr); + ib.ptr[4] = 0xDEADBEEF; + ib.length_dw = 5; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) @@ -530,15 +574,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; goto err2; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } - tmp = RREG32(scratch); + tmp = adev->wb.wb[index]; if (tmp == 0xDEADBEEF) r = 0; else @@ -547,8 +589,7 @@ err2: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err1: - amdgpu_gfx_scratch_free(adev, scratch); - + amdgpu_device_wb_free(adev, index); return r; } @@ -1016,6 +1057,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1068,7 +1113,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) return r; } - memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); + memset(hpd, 0, mec_hpd_size); amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); @@ -1783,11 +1828,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev) adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ - WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); - WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO, adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); - WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); return 0; } @@ -1895,6 +1940,11 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ gfx_v10_0_rlc_enable_srm(adev); } else { + if (amdgpu_sriov_vf(adev)) { + gfx_v10_0_init_csb(adev); + return 0; + } + adev->gfx.rlc.funcs->stop(adev); /* disable CG */ @@ -2395,7 +2445,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].sched.ready = false; } - WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); + WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); for (i = 0; i < adev->usec_timeout; i++) { if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0) @@ -3168,12 +3218,7 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.num_gfx_rings; i++) kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); - r = amdgpu_ring_test_ring(kiq_ring); - if (r) { - DRM_ERROR("kfq enable failed\n"); - kiq_ring->sched.ready = false; - } - return r; + return amdgpu_ring_test_helper(kiq_ring); } #endif @@ -3216,6 +3261,22 @@ done: return r; } +static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3341,6 +3402,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; + /* set static priority for a compute queue/ring */ + gfx_v10_0_compute_mqd_set_priority(ring, mqd); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -3790,7 +3854,7 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev) kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], PREEMPT_QUEUES, 0, 0); - return amdgpu_ring_test_ring(kiq_ring); + return amdgpu_ring_test_helper(kiq_ring); } #endif @@ -3930,9 +3994,8 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) | + ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); amdgpu_gfx_off_ctrl(adev, true); return clock; @@ -4041,6 +4104,12 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { + /* 0 - Disable some blocks' MGCG */ + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + WREG32_SOC15(GC, 0, mmCGTT_WD_CLK_CTRL, 0xff000000); + WREG32_SOC15(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xff000000); + WREG32_SOC15(GC, 0, mmCGTT_IA_CLK_CTRL, 0xff000000); + /* 1 - RLC_CGTT_MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | @@ -4080,19 +4149,20 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); - /* 2 - disable MGLS in RLC */ + /* 2 - disable MGLS in CP */ + data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { + data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); + } + + /* 3 - disable MGLS in RLC */ data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); } - /* 3 - disable MGLS in CP */ - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); - if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { - data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); - } } } @@ -4220,6 +4290,45 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, + uint32_t offset, + struct soc15_reg_rlcg *entries, int arr_size) +{ + int i; + uint32_t reg; + + if (!entries) + return false; + + for (i = 0; i < arr_size; i++) { + const struct soc15_reg_rlcg *entry; + + entry = &entries[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + if (offset == reg) + return true; + } + + return false; +} + +static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) +{ + return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0); +} + static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { .is_rlc_enabled = gfx_v10_0_is_rlc_enabled, .set_safe_mode = gfx_v10_0_set_safe_mode, @@ -4230,7 +4339,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { .resume = gfx_v10_0_rlc_resume, .stop = gfx_v10_0_rlc_stop, .reset = gfx_v10_0_rlc_reset, - .start = gfx_v10_0_rlc_start + .start = gfx_v10_0_rlc_start, + .update_spm_vmid = gfx_v10_0_update_spm_vmid, + .rlcg_wreg = gfx_v10_rlcg_wreg, + .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range, }; static int gfx_v10_0_set_powergating_state(void *handle, @@ -4241,11 +4353,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: - if (!enable) { - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } else - amdgpu_gfx_off_ctrl(adev, true); + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; @@ -4419,15 +4527,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, control |= ib->length_dw | (vmid << 24); - if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { + if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); if (flags & AMDGPU_IB_PREEMPTED) control |= INDIRECT_BUFFER_PRE_RESUME(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v10_0_ring_emit_de_meta(ring, - flags & AMDGPU_IB_PREEMPTED ? true : false); + (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); } amdgpu_ring_write(ring, header); @@ -4574,9 +4682,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag { uint32_t dw2 = 0; - if (amdgpu_mcbp) + if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev)) gfx_v10_0_ring_emit_ce_meta(ring, - flags & AMDGPU_IB_PREEMPTED ? true : false); + (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); gfx_v10_0_ring_emit_tmz(ring, true); @@ -4806,6 +4914,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } +static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, + unsigned vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t value = 0; + + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + WREG32_SOC15(GC, 0, mmSQ_CMD, value); +} + static void gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -5197,6 +5318,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v10_0_ring_soft_recovery, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 8f20a5dd44fe..733d398c61cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) return 0; } +static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + WREG32(mmRLC_SPM_VMID, data); +} + static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp, tmp2; @@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .resume = gfx_v7_0_rlc_resume, .stop = gfx_v7_0_rlc_stop, .reset = gfx_v7_0_rlc_reset, - .start = gfx_v7_0_rlc_start + .start = gfx_v7_0_rlc_start, + .update_spm_vmid = gfx_v7_0_update_spm_vmid }; static int gfx_v7_0_early_init(void *handle) @@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index fa245973de12..fc32586ef80b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { @@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) return r; } +static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) /* defaults */ mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR); mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR); - mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY); - mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY); - mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO); mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI); mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET); @@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM); mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES); + /* set static priority for a queue/ring */ + gfx_v8_0_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) } } +static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + WREG32(mmRLC_SPM_VMID, data); +} + static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, .set_safe_mode = gfx_v8_0_set_safe_mode, @@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .resume = gfx_v8_0_rlc_resume, .stop = gfx_v8_0_rlc_stop, .reset = gfx_v8_0_rlc_reset, - .start = gfx_v8_0_rlc_start + .start = gfx_v8_0_rlc_start, + .update_spm_vmid = gfx_v8_0_update_spm_vmid }; static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, @@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v8_0_ring_emit_de_meta(ring); } @@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring) WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } -static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring, - bool acquire) -{ - struct amdgpu_device *adev = ring->adev; - int pipe_num, tmp, reg; - int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; - - pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; - - /* first me only has 2 entries, GFX and HP3D */ - if (ring->me > 0) - pipe_num -= 2; - - reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num; - tmp = RREG32(reg); - tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); - WREG32(reg, tmp); -} - -static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - int i, pipe; - bool reserve; - struct amdgpu_ring *iring; - - mutex_lock(&adev->gfx.pipe_reserve_mutex); - pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); - if (acquire) - set_bit(pipe, adev->gfx.pipe_reserve_bitmap); - else - clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); - - if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { - /* Clear all reservations - everyone reacquires all resources */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) - gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], - true); - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) - gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], - true); - } else { - /* Lower all pipes without a current reservation */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { - iring = &adev->gfx.gfx_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v8_0_ring_set_pipe_percent(iring, reserve); - } - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) { - iring = &adev->gfx.compute_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v8_0_ring_set_pipe_percent(iring, reserve); - } - } - - mutex_unlock(&adev->gfx.pipe_reserve_mutex); -} - -static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - uint32_t pipe_priority = acquire ? 0x2 : 0x0; - uint32_t queue_priority = acquire ? 0xf : 0x0; - - mutex_lock(&adev->srbm_mutex); - vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - - WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority); - WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority); - - vi_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} -static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - struct amdgpu_device *adev = ring->adev; - bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - return; - - gfx_v8_0_hqd_set_priority(adev, ring, acquire); - gfx_v8_0_pipe_reserve_resources(adev, ring, acquire); -} - static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) @@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, - .set_priority = gfx_v8_0_ring_set_priority_compute, .emit_wreg = gfx_v8_0_ring_emit_wreg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 889154a78c4a..d2d9dce68c2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -697,6 +697,11 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00), }; +static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = { + {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)}, + {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)}, +}; + static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = { mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0, @@ -721,6 +726,59 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, }; +void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v) +{ + static void *scratch_reg0; + static void *scratch_reg1; + static void *scratch_reg2; + static void *scratch_reg3; + static void *spare_int; + static uint32_t grbm_cntl; + static uint32_t grbm_idx; + + scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4; + scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4; + scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4; + scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4; + spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4; + + grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; + grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; + + if (amdgpu_sriov_runtime(adev)) { + pr_err("shouldn't call rlcg write register during runtime\n"); + return; + } + + if (offset == grbm_cntl || offset == grbm_idx) { + if (offset == grbm_cntl) + writel(v, scratch_reg2); + else if (offset == grbm_idx) + writel(v, scratch_reg3); + + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + } else { + uint32_t i = 0; + uint32_t retries = 50000; + + writel(v, scratch_reg0); + writel(offset | 0x80000000, scratch_reg1); + writel(1, spare_int); + for (i = 0; i < retries; i++) { + u32 tmp; + + tmp = readl(scratch_reg1); + if (!(tmp & 0x80000000)) + break; + + udelay(10); + } + if (i >= retries) + pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); + } + +} + #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 @@ -738,9 +796,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); -static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); +static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) @@ -1106,10 +1164,11 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.me_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false; - if ((adev->gfx.mec_fw_version < 0x000001a5) || + if ((adev->asic_type != CHIP_ARCTURUS) && + ((adev->gfx.mec_fw_version < 0x000001a5) || (adev->gfx.mec_feature_version < 46) || (adev->gfx.pfp_fw_version < 0x000000b7) || - (adev->gfx.pfp_feature_version < 46)) + (adev->gfx.pfp_feature_version < 46))) DRM_WARN_ONCE("CP firmware version too old, please update!"); switch (adev->asic_type) { @@ -1158,6 +1217,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.mec_fw_write_wait = true; break; default: + adev->gfx.me_fw_write_wait = true; + adev->gfx.mec_fw_write_wait = true; break; } } @@ -1173,6 +1234,10 @@ struct amdgpu_gfxoff_quirk { static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */ + { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, + /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, { 0, 0, 0, 0, 0 }, }; @@ -1846,6 +1911,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) break; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1883,7 +1952,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) return r; } - memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); + memset(hpd, 0, mec_hpd_size); amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); @@ -1916,7 +1985,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) { - WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (address << SQ_IND_INDEX__INDEX__SHIFT) | @@ -1928,7 +1997,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t regno, uint32_t num, uint32_t *out) { - WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (regno << SQ_IND_INDEX__INDEX__SHIFT) | @@ -1992,7 +2061,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, .ras_error_inject = &gfx_v9_0_ras_error_inject, - .query_ras_error_count = &gfx_v9_0_query_ras_error_count + .query_ras_error_count = &gfx_v9_0_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, }; static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { @@ -2003,7 +2073,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, .ras_error_inject = &gfx_v9_4_ras_error_inject, - .query_ras_error_count = &gfx_v9_4_query_ras_error_count + .query_ras_error_count = &gfx_v9_4_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, }; static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) @@ -3309,6 +3380,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp); } +static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3445,6 +3532,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; + /* set static priority for a queue/ring */ + gfx_v9_0_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -3963,6 +4054,63 @@ static int gfx_v9_0_soft_reset(void *handle) return 0; } +static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_rreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); + amdgpu_ring_write(ring, 9 | /* src: register*/ + (5 << 8) | /* dst: memory */ + (1 << 16) | /* count sel */ + (1 << 20)); /* write confirm */ + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + + kiq->reg_val_offs * 4)); + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + + kiq->reg_val_offs * 4)); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_read; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + return (uint64_t)adev->wb.wb[kiq->reg_val_offs] | + (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL; + +failed_kiq_read: + pr_err("failed to read gpu clock\n"); + return ~0; +} + static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -3970,16 +4118,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { - uint32_t tmp, lsb, msb, i = 0; - do { - if (i != 0) - udelay(1); - tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); - lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB); - msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); - i++; - } while (unlikely(tmp != msb) && (i < adev->usec_timeout)); - clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL); + clock = gfx_v9_0_kiq_read_clock(adev); } else { WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | @@ -4053,6 +4192,101 @@ static const u32 sgpr_init_compute_shader[] = 0xbe800080, 0xbf810000, }; +static const u32 vgpr_init_compute_shader_arcturus[] = { + 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080, + 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080, + 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080, + 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080, + 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080, + 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080, + 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080, + 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080, + 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080, + 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080, + 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080, + 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080, + 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080, + 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080, + 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080, + 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080, + 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080, + 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080, + 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080, + 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080, + 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080, + 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080, + 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080, + 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080, + 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080, + 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080, + 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080, + 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080, + 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080, + 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080, + 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080, + 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080, + 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080, + 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080, + 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080, + 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080, + 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080, + 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080, + 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080, + 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080, + 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080, + 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080, + 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080, + 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080, + 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080, + 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080, + 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080, + 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080, + 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080, + 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080, + 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080, + 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080, + 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080, + 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080, + 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080, + 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080, + 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080, + 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080, + 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080, + 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080, + 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080, + 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080, + 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080, + 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080, + 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080, + 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080, + 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080, + 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080, + 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080, + 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080, + 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080, + 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080, + 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080, + 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080, + 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080, + 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080, + 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080, + 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080, + 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080, + 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080, + 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080, + 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080, + 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080, + 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080, + 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080, + 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a, + 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280, + 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000, + 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904, + 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000, + 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a, + 0xbf84fff8, 0xbf810000, +}; + /* When below register arrays changed, please update gpr_reg_size, and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds, to cover all gfx9 ASICs */ @@ -4073,6 +4307,23 @@ static const struct soc15_reg_entry vgpr_init_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, }; +static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + static const struct soc15_reg_entry sgpr1_init_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, @@ -4141,7 +4392,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6}, - { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1}, }; static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) @@ -4204,7 +4454,10 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) adev->gfx.config.max_cu_per_sh * adev->gfx.config.max_sh_per_se; int sgpr_work_group_size = 5; - int gpr_reg_size = compute_dim_x / 16 + 6; + int gpr_reg_size = adev->gfx.config.max_shader_engines + 6; + int vgpr_init_shader_size; + const u32 *vgpr_init_shader_ptr; + const struct soc15_reg_entry *vgpr_init_regs_ptr; /* only support when RAS is enabled */ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) @@ -4214,6 +4467,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!ring->sched.ready) return 0; + if (adev->asic_type == CHIP_ARCTURUS) { + vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus; + vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus); + vgpr_init_regs_ptr = vgpr_init_regs_arcturus; + } else { + vgpr_init_shader_ptr = vgpr_init_compute_shader; + vgpr_init_shader_size = sizeof(vgpr_init_compute_shader); + vgpr_init_regs_ptr = vgpr_init_regs; + } + total_size = (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */ total_size += @@ -4222,7 +4485,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */ total_size = ALIGN(total_size, 256); vgpr_offset = total_size; - total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); + total_size += ALIGN(vgpr_init_shader_size, 256); sgpr_offset = total_size; total_size += sizeof(sgpr_init_compute_shader); @@ -4235,8 +4498,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) } /* load the compute shaders */ - for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++) - ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i]; + for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++) + ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i]; for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++) ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i]; @@ -4248,9 +4511,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write the register state for the compute dispatch */ for (i = 0; i < gpr_reg_size; i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); - ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i]) + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i]) - PACKET3_SET_SH_REG_START; - ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value; + ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value; } /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8; @@ -4262,7 +4525,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = compute_dim_x; /* x */ + ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4342,18 +4605,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) goto fail; } - switch (adev->asic_type) - { - case CHIP_VEGA20: - gfx_v9_0_clear_ras_edc_counter(adev); - break; - case CHIP_ARCTURUS: - gfx_v9_4_clear_ras_edc_counter(adev); - break; - default: - break; - } - fail: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); @@ -4401,6 +4652,10 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; + if (adev->gfx.funcs && + adev->gfx.funcs->reset_ras_error_count) + adev->gfx.funcs->reset_ras_error_count(adev); + r = amdgpu_gfx_ras_late_init(adev); if (r) return r; @@ -4705,6 +4960,47 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, + uint32_t offset, + struct soc15_reg_rlcg *entries, int arr_size) +{ + int i; + uint32_t reg; + + if (!entries) + return false; + + for (i = 0; i < arr_size; i++) { + const struct soc15_reg_rlcg *entry; + + entry = &entries[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + if (offset == reg) + return true; + } + + return false; +} + +static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) +{ + return gfx_v9_0_check_rlcg_range(adev, offset, + (void *)rlcg_access_gc_9_0, + ARRAY_SIZE(rlcg_access_gc_9_0)); +} + static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .is_rlc_enabled = gfx_v9_0_is_rlc_enabled, .set_safe_mode = gfx_v9_0_set_safe_mode, @@ -4716,7 +5012,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .resume = gfx_v9_0_rlc_resume, .stop = gfx_v9_0_rlc_stop, .reset = gfx_v9_0_rlc_reset, - .start = gfx_v9_0_rlc_start + .start = gfx_v9_0_rlc_start, + .update_spm_vmid = gfx_v9_0_update_spm_vmid, + .rlcg_wreg = gfx_v9_0_rlcg_wreg, + .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range, }; static int gfx_v9_0_set_powergating_state(void *handle, @@ -4728,10 +5027,9 @@ static int gfx_v9_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: case CHIP_RENOIR: - if (!enable) { + if (!enable) amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); @@ -4755,12 +5053,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, amdgpu_gfx_off_ctrl(adev, true); break; case CHIP_VEGA12: - if (!enable) { - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } else { - amdgpu_gfx_off_ctrl(adev, true); - } + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; @@ -4919,7 +5212,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v9_0_ring_emit_de_meta(ring); } @@ -5044,105 +5337,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring) return wptr; } -static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring, - bool acquire) -{ - struct amdgpu_device *adev = ring->adev; - int pipe_num, tmp, reg; - int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; - - pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; - - /* first me only has 2 entries, GFX and HP3D */ - if (ring->me > 0) - pipe_num -= 2; - - reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num; - tmp = RREG32(reg); - tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); - WREG32(reg, tmp); -} - -static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - int i, pipe; - bool reserve; - struct amdgpu_ring *iring; - - mutex_lock(&adev->gfx.pipe_reserve_mutex); - pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); - if (acquire) - set_bit(pipe, adev->gfx.pipe_reserve_bitmap); - else - clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); - - if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { - /* Clear all reservations - everyone reacquires all resources */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) - gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], - true); - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) - gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], - true); - } else { - /* Lower all pipes without a current reservation */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { - iring = &adev->gfx.gfx_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v9_0_ring_set_pipe_percent(iring, reserve); - } - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) { - iring = &adev->gfx.compute_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v9_0_ring_set_pipe_percent(iring, reserve); - } - } - - mutex_unlock(&adev->gfx.pipe_reserve_mutex); -} - -static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - uint32_t pipe_priority = acquire ? 0x2 : 0x0; - uint32_t queue_priority = acquire ? 0xf : 0x0; - - mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - - WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority); - WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority); - - soc15_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} - -static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - struct amdgpu_device *adev = ring->adev; - bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - return; - - gfx_v9_0_hqd_set_priority(adev, ring, acquire); - gfx_v9_0_pipe_reserve_resources(adev, ring, acquire); -} - static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -6322,10 +6516,13 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, return 0; } -static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev) +static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) { int i, j, k; + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + /* read back registers to clear the counters */ mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { @@ -6513,7 +6710,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .test_ib = gfx_v9_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, - .set_priority = gfx_v9_0_ring_set_priority_compute, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index f099f13d7f1e..dce945ef21a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -710,14 +710,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, vml2_mems[i], sec_count); err_data->ce_count += sec_count; } ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, vml2_mems[i], ded_count); err_data->ue_count += ded_count; } @@ -893,10 +895,13 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, return 0; } -void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev) +void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) { int i, j, k; + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h index 2e3f6f755ad4..1ffecc5c0f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if); +void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev); + #endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index b70c7b483c24..cc866c367939 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; - /* Disable AGP. */ - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF); - - /* Program the system aperture low logical page number. */ - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); - - /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, - (u32)(value >> 12)); - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, - (u32)(value >> 44)); + if (!amdgpu_sriov_vf(adev)) { + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Disable AGP. */ + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->gmc.vram_start >> 18); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->gmc.vram_end >> 18); + + /* Set default page address. */ + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + adev->vm_manager.vram_base_offset; + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + } /* Program "protection fault". */ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, @@ -135,6 +142,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + /* These regs are not accessible for VF, PF will program these in SRIOV */ + if (amdgpu_sriov_vf(adev)) + return; + /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -256,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ gfxhub_v2_0_init_gart_aperture_regs(adev); gfxhub_v2_0_init_system_aperture_regs(adev); @@ -298,9 +297,11 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 0); WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp); - /* Setup L2 cache */ - WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0); - WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0); + if (!amdgpu_sriov_vf(adev)) { + /* Setup L2 cache */ + WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0); + } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index cc0c273a86f9..8606f877478f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -476,13 +476,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); const unsigned eng = 17; - u32 j, inv_req, tmp; + u32 j, inv_req, inv_req2, tmp; struct amdgpu_vmhub *hub; BUG_ON(vmhub >= adev->num_vmhubs); hub = &adev->vmhub[vmhub]; - inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); + if (adev->gmc.xgmi.num_physical_nodes && + adev->asic_type == CHIP_VEGA20) { + /* Vega20+XGMI caches PTEs in TC and TLB. Add a + * heavy-weight TLB flush (type 2), which flushes + * both. Due to a race condition with concurrent + * memory accesses using the same TLB cache line, we + * still need a second TLB flush after this. + */ + inv_req = gmc_v9_0_get_invalidate_req(vmid, 2); + inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); + } else { + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); + inv_req2 = 0; + } /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal @@ -521,21 +534,27 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); + do { + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); - /* - * Issue a dummy read to wait for the ACK register to be cleared - * to avoid a false ACK due to the new fast GRBM interface. - */ - if (vmhub == AMDGPU_GFXHUB_0) - RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); + /* + * Issue a dummy read to wait for the ACK register to + * be cleared to avoid a false ACK due to the new fast + * GRBM interface. + */ + if (vmhub == AMDGPU_GFXHUB_0) + RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); - if (tmp & (1 << vmid)) - break; - udelay(1); - } + for (j = 0; j < adev->usec_timeout; j++) { + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); + if (tmp & (1 << vmid)) + break; + udelay(1); + } + + inv_req = inv_req2; + inv_req2 = 0; + } while (inv_req); /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ if (use_semaphore) @@ -577,9 +596,26 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, return -EIO; if (ring->sched.ready) { + /* Vega20+XGMI caches PTEs in TC and TLB. Add a + * heavy-weight TLB flush (type 2), which flushes + * both. Due to a race condition with concurrent + * memory accesses using the same TLB cache line, we + * still need a second TLB flush after this. + */ + bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && + adev->asic_type == CHIP_VEGA20); + /* 2 dwords flush + 8 dwords fence */ + unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8; + + if (vega20_xgmi_wa) + ndw += kiq->pmf->invalidate_tlbs_size; + spin_lock(&adev->gfx.kiq.ring_lock); /* 2 dwords flush + 8 dwords fence */ - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); + amdgpu_ring_alloc(ring, ndw); + if (vega20_xgmi_wa) + kiq->pmf->kiq_invalidate_tlbs(ring, + pasid, 2, all_hub); kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); amdgpu_fence_emit_polling(ring, &seq); @@ -886,32 +922,25 @@ static int gmc_v9_0_late_init(void *handle) if (r) return r; /* Check if ecc is available */ - if (!amdgpu_sriov_vf(adev)) { - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA20: - case CHIP_ARCTURUS: - r = amdgpu_atomfirmware_mem_ecc_supported(adev); - if (!r) { - DRM_INFO("ECC is not present.\n"); - if (adev->df.funcs->enable_ecc_force_par_wr_rmw) - adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); - } else { - DRM_INFO("ECC is active.\n"); - } - - r = amdgpu_atomfirmware_sram_ecc_supported(adev); - if (!r) { - DRM_INFO("SRAM ECC is not present.\n"); - } else { - DRM_INFO("SRAM ECC is active.\n"); - } - break; - default: - break; - } + if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) { + r = amdgpu_atomfirmware_mem_ecc_supported(adev); + if (!r) { + DRM_INFO("ECC is not present.\n"); + if (adev->df.funcs->enable_ecc_force_par_wr_rmw) + adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); + } else + DRM_INFO("ECC is active.\n"); + + r = amdgpu_atomfirmware_sram_ecc_supported(adev); + if (!r) + DRM_INFO("SRAM ECC is not present.\n"); + else + DRM_INFO("SRAM ECC is active.\n"); } + if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count) + adev->mmhub.funcs->reset_ras_error_count(adev); + r = amdgpu_gmc_ras_late_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 49a3a56ec017..396c2a624de0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ue_count += ded_count; } +static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* read back edc counter registers to reset the counters to 0 */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); + } +} + const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v1_0_query_ras_error_count, + .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index bde189680521..fb3f228458e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF); - /* Program the system aperture low logical page number. */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + if (!amdgpu_sriov_vf(adev)) { + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->gmc.vram_start >> 18); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->gmc.vram_end >> 18); + } /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + @@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ mmhub_v2_0_init_gart_aperture_regs(adev); mmhub_v2_0_init_system_aperture_regs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index a5281df8d84f..c0e3efcb09bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -1539,8 +1539,11 @@ static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 }, }; -static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, - uint32_t value, uint32_t *sec_count, uint32_t *ded_count) +static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, + uint32_t value, + uint32_t *sec_count, + uint32_t *ded_count) { uint32_t i; uint32_t sec_cnt, ded_cnt; @@ -1553,7 +1556,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, mmhub_v9_4_ras_fields[i].sec_count_mask) >> mmhub_v9_4_ras_fields[i].sec_count_shift; if (sec_cnt) { - DRM_INFO("MMHUB SubBlock %s, SEC %d\n", + dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n", mmhub_v9_4_ras_fields[i].name, sec_cnt); *sec_count += sec_cnt; @@ -1563,7 +1566,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, mmhub_v9_4_ras_fields[i].ded_count_mask) >> mmhub_v9_4_ras_fields[i].ded_count_shift; if (ded_cnt) { - DRM_INFO("MMHUB SubBlock %s, DED %d\n", + dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n", mmhub_v9_4_ras_fields[i].name, ded_cnt); *ded_count += ded_cnt; @@ -1588,7 +1591,7 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); if (reg_value) - mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i], + mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i], reg_value, &sec_count, &ded_count); } @@ -1596,7 +1599,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, err_data->ue_count += ded_count; } +static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* read back edc counter registers to reset the counters to 0 */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); + } +} + const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v9_4_query_ras_error_count, + .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h new file mode 100644 index 000000000000..1b5086c7d4e6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h @@ -0,0 +1,338 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MMSCH_V2_0_H__ +#define __MMSCH_V2_0_H__ + +// addressBlock: uvd0_mmsch_dec +// base address: 0x1e000 +#define mmMMSCH_UCODE_ADDR 0x0000 +#define mmMMSCH_UCODE_ADDR_BASE_IDX 0 +#define mmMMSCH_UCODE_DATA 0x0001 +#define mmMMSCH_UCODE_DATA_BASE_IDX 0 +#define mmMMSCH_SRAM_ADDR 0x0002 +#define mmMMSCH_SRAM_ADDR_BASE_IDX 0 +#define mmMMSCH_SRAM_DATA 0x0003 +#define mmMMSCH_SRAM_DATA_BASE_IDX 0 +#define mmMMSCH_VF_SRAM_OFFSET 0x0004 +#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_DB_SRAM_OFFSET 0x0005 +#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_CTX_SRAM_OFFSET 0x0006 +#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_CTL 0x0007 +#define mmMMSCH_CTL_BASE_IDX 0 +#define mmMMSCH_INTR 0x0008 +#define mmMMSCH_INTR_BASE_IDX 0 +#define mmMMSCH_INTR_ACK 0x0009 +#define mmMMSCH_INTR_ACK_BASE_IDX 0 +#define mmMMSCH_INTR_STATUS 0x000a +#define mmMMSCH_INTR_STATUS_BASE_IDX 0 +#define mmMMSCH_VF_VMID 0x000b +#define mmMMSCH_VF_VMID_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_LO 0x000c +#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_HI 0x000d +#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_CTX_SIZE 0x000e +#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f +#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010 +#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_SIZE 0x0011 +#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_HOST 0x0012 +#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_RESP 0x0013 +#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_0 0x0014 +#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015 +#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_1 0x0016 +#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017 +#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0 +#define mmMMSCH_CNTL 0x001c +#define mmMMSCH_CNTL_BASE_IDX 0 +#define mmMMSCH_NONCACHE_OFFSET0 0x001d +#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0 +#define mmMMSCH_NONCACHE_SIZE0 0x001e +#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0 +#define mmMMSCH_NONCACHE_OFFSET1 0x001f +#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0 +#define mmMMSCH_NONCACHE_SIZE1 0x0020 +#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0 +#define mmMMSCH_PDEBUG_STATUS 0x0021 +#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022 +#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023 +#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_EPC 0x0024 +#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0 +#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025 +#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0 +#define mmMMSCH_PROC_STATE1 0x0026 +#define mmMMSCH_PROC_STATE1_BASE_IDX 0 +#define mmMMSCH_LAST_MC_ADDR 0x0027 +#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0 +#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028 +#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0 +#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029 +#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0 +#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a +#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmMMSCH_SCRATCH_0 0x002b +#define mmMMSCH_SCRATCH_0_BASE_IDX 0 +#define mmMMSCH_SCRATCH_1 0x002c +#define mmMMSCH_SCRATCH_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d +#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e +#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f +#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_0 0x0033 +#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_0 0x0034 +#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_0 0x0035 +#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036 +#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037 +#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038 +#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_1 0x003c +#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_1 0x003d +#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_1 0x003e +#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CNTXT 0x003f +#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0 +#define mmMMSCH_SCRATCH_2 0x0040 +#define mmMMSCH_SCRATCH_2_BASE_IDX 0 +#define mmMMSCH_SCRATCH_3 0x0041 +#define mmMMSCH_SCRATCH_3_BASE_IDX 0 +#define mmMMSCH_SCRATCH_4 0x0042 +#define mmMMSCH_SCRATCH_4_BASE_IDX 0 +#define mmMMSCH_SCRATCH_5 0x0043 +#define mmMMSCH_SCRATCH_5_BASE_IDX 0 +#define mmMMSCH_SCRATCH_6 0x0044 +#define mmMMSCH_SCRATCH_6_BASE_IDX 0 +#define mmMMSCH_SCRATCH_7 0x0045 +#define mmMMSCH_SCRATCH_7_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046 +#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047 +#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048 +#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049 +#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0 +#define mmMMSCH_NACK_STATUS 0x004a +#define mmMMSCH_NACK_STATUS_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX0_DATA 0x004b +#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX1_DATA 0x004c +#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053 +#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054 +#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055 +#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056 +#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_2 0x005a +#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_2 0x005b +#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_2 0x005c +#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060 +#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061 +#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_0 0x0062 +#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_1 0x0063 +#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_2 0x0064 +#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0 + +#define MMSCH_VERSION_MAJOR 2 +#define MMSCH_VERSION_MINOR 0 +#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) + +enum mmsch_v2_0_command_type { + MMSCH_COMMAND__DIRECT_REG_WRITE = 0, + MMSCH_COMMAND__DIRECT_REG_POLLING = 2, + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3, + MMSCH_COMMAND__INDIRECT_REG_WRITE = 8, + MMSCH_COMMAND__END = 0xf +}; + +struct mmsch_v2_0_init_header { + uint32_t version; + uint32_t header_size; + uint32_t vcn_init_status; + uint32_t vcn_table_offset; + uint32_t vcn_table_size; +}; + +struct mmsch_v2_0_cmd_direct_reg_header { + uint32_t reg_offset : 28; + uint32_t command_type : 4; +}; + +struct mmsch_v2_0_cmd_indirect_reg_header { + uint32_t reg_offset : 20; + uint32_t reg_idx_space : 8; + uint32_t command_type : 4; +}; + +struct mmsch_v2_0_cmd_direct_write { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t reg_value; +}; + +struct mmsch_v2_0_cmd_direct_read_modify_write { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t write_data; + uint32_t mask_value; +}; + +struct mmsch_v2_0_cmd_direct_polling { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t mask_value; + uint32_t wait_value; +}; + +struct mmsch_v2_0_cmd_end { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; +}; + +struct mmsch_v2_0_cmd_indirect_write { + struct mmsch_v2_0_cmd_indirect_reg_header cmd_header; + uint32_t reg_value; +}; + +static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t value) +{ + direct_wt->cmd_header.reg_offset = reg_offset; + direct_wt->reg_value = value; + memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write)); +} + +static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t mask, uint32_t data) +{ + direct_rd_mod_wt->cmd_header.reg_offset = reg_offset; + direct_rd_mod_wt->mask_value = mask; + direct_rd_mod_wt->write_data = data; + memcpy((void *)init_table, direct_rd_mod_wt, + sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)); +} + +static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t mask, uint32_t wait) +{ + direct_poll->cmd_header.reg_offset = reg_offset; + direct_poll->mask_value = mask; + direct_poll->wait_value = wait; + memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling)); +} + +#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \ + mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \ + init_table, (reg), \ + (mask), (data)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \ +} + +#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \ + mmsch_v2_0_insert_direct_wt(&direct_wt, \ + init_table, (reg), \ + (value)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \ +} + +#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \ + mmsch_v2_0_insert_direct_poll(&direct_poll, \ + init_table, (reg), \ + (mask), (wait)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \ +} + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index cf557a428298..e08245a446fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -32,6 +32,7 @@ #include "soc15_common.h" #include "navi10_ih.h" +#define MAX_REARM_RETRY 10 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev); @@ -284,6 +285,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev, } /** + * navi10_ih_irq_rearm - rearm IRQ if lost + * + * @adev: amdgpu_device pointer + * + */ +static void navi10_ih_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t reg_rptr = 0; + uint32_t v = 0; + uint32_t i = 0; + + if (ih == &adev->irq.ih) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + else if (ih == &adev->irq.ih1) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + else if (ih == &adev->irq.ih2) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + else + return; + + /* Rearm IRQ / re-write doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(reg_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } +} + +/** * navi10_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer @@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev, /* XXX check if swapping is necessary on BE */ *ih->rptr_cpu = ih->rptr; WDOORBELL32(ih->doorbell_index, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + navi10_ih_irq_rearm(adev, ih); } else WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); } diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h index 074a9a09c0a7..a5b60c9a2418 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h @@ -73,6 +73,22 @@ #define SDMA_OP_AQL_COPY 0 #define SDMA_OP_AQL_BARRIER_OR 0 +#define SDMA_GCR_RANGE_IS_PA (1 << 18) +#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16) +#define SDMA_GCR_GL2_WB (1 << 15) +#define SDMA_GCR_GL2_INV (1 << 14) +#define SDMA_GCR_GL2_DISCARD (1 << 13) +#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11) +#define SDMA_GCR_GL2_US (1 << 10) +#define SDMA_GCR_GL1_INV (1 << 9) +#define SDMA_GCR_GLV_INV (1 << 8) +#define SDMA_GCR_GLK_INV (1 << 7) +#define SDMA_GCR_GLK_WB (1 << 6) +#define SDMA_GCR_GLM_INV (1 << 5) +#define SDMA_GCR_GLM_WB (1 << 4) +#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2) +#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0) + /*define for op field*/ #define SDMA_PKT_HEADER_op_offset 0 #define SDMA_PKT_HEADER_op_mask 0x000000FF diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 65eb378fa035..149d386590df 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -318,6 +318,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device { uint32_t bif_doorbell_intr_cntl; struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); + struct ras_err_data err_data = {0, 0, 0, NULL}; bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); if (REG_GET_FIELD(bif_doorbell_intr_cntl, @@ -332,7 +333,19 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device * clear error status after ras_controller_intr according to * hw team and count ue number for query */ - nbio_v7_4_query_ras_error_count(adev, &obj->err_data); + nbio_v7_4_query_ras_error_count(adev, &err_data); + + /* logging on error counter and printing for awareness */ + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + + if (err_data.ce_count) + DRM_INFO("%ld correctable errors detected in %s block\n", + obj->err_data.ce_count, adev->nbio.ras_if->name); + + if (err_data.ue_count) + DRM_INFO("%ld uncorrectable errors detected in %s block\n", + obj->err_data.ue_count, adev->nbio.ras_if->name); DRM_WARN("RAS controller interrupt triggered by NBIF error\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 2d1bebdf1603..52318b03c424 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev) struct smu_context *smu = &adev->smu; if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); ret = smu_baco_enter(smu); if (ret) return ret; @@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev) if (ret) return ret; } else { - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); ret = nv_asic_mode1_reset(adev); } @@ -516,7 +512,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 36b65797434e..a44fd6060d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -31,6 +31,9 @@ #define GFX_CMD_RESERVED_MASK 0x7FF00000 #define GFX_CMD_RESPONSE_MASK 0x80000000 +/* USBC PD FW version retrieval command */ +#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000 + /* TEE Gfx Command IDs for the register interface. * Command ID must be between 0x00010000 and 0x000F0000. */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0829188c1a5c..0afd610a1263 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_psp.h" +#include "amdgpu_ras.h" #include "amdgpu_ucode.h" #include "soc15_common.h" #include "psp_v11_0.h" @@ -65,6 +66,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); /* memory training timeout define */ #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 +/* For large FW files the time to complete can be very long */ +#define USBC_PD_POLLING_LIMIT_S 240 + static int psp_v11_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -420,7 +424,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp, struct psp_ring *ring; struct amdgpu_device *adev = psp->adev; - psp_v11_0_reroute_ih(psp); + if (!amdgpu_sriov_vf(adev)) + psp_v11_0_reroute_ih(psp); ring = &psp->km_ring; @@ -864,6 +869,11 @@ static int psp_v11_0_ras_trigger_error(struct psp_context *psp, if (ret) return -EINVAL; + /* If err_event_athub occurs error inject was successful, however + return status from TA is no long reliable */ + if (amdgpu_ras_intr_triggered()) + return 0; + return ras_cmd->ras_status; } @@ -1108,6 +1118,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } +static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t reg_status; + int ret, i = 0; + + /* Write lower 32-bit address of the PD Controller FW */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr)); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the lower address */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35); + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n", + reg_status & 0xFFFF); + return -EIO; + } + + /* Write upper 32-bit address of the PD Controller FW */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr)); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the upper address */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000); + + /* FW load takes very long time */ + do { + msleep(1000); + reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35); + + if (reg_status & 0x80000000) + goto done; + + } while (++i < USBC_PD_POLLING_LIMIT_S); + + return -ETIME; +done: + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n", + reg_status & 0xFFFF); + return -EIO; + } + + return 0; +} + +static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (!ret) + *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36); + + return ret; +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, @@ -1132,6 +1218,8 @@ static const struct psp_funcs psp_v11_0_funcs = { .mem_training = psp_v11_0_memory_training, .ring_get_wptr = psp_v11_0_ring_get_wptr, .ring_set_wptr = psp_v11_0_ring_set_wptr, + .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw, + .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e55884d204bd..5f3a5ee2a3f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -677,7 +677,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) } /** - * sdma_v4_0_ring_set_wptr - commit the write pointer + * sdma_v4_0_page_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * @@ -977,7 +977,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) } /** - * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch + * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. @@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle) struct ras_ih_if ih_info = { .cb = sdma_v4_0_process_ras_data_cb, }; - int i; - /* read back edc counter registers to clear the counters */ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { - for (i = 0; i < adev->sdma.num_instances; i++) - RREG32_SDMA(i, mmSDMA0_EDC_COUNTER); - } + if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count) + adev->sdma.funcs->reset_ras_error_count(adev); if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init) return adev->sdma.funcs->ras_late_init(adev, &ih_info); @@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, return 0; }; +static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i; + + /* read back edc counter registers to clear the counters */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) + RREG32_SDMA(i, mmSDMA0_EDC_COUNTER); + } +} + static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = { .ras_late_init = amdgpu_sdma_ras_late_init, .ras_fini = amdgpu_sdma_ras_fini, .query_ras_error_count = sdma_v4_0_query_ras_error_count, + .reset_ras_error_count = sdma_v4_0_reset_ras_error_count, }; static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 67b9830b7c7e..d2840c2f6286 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); + /* Invalidate L2, because if we don't do it, we might get stale cache + * lines from previous IBs. + */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV | + SDMA_GCR_GL2_WB | + SDMA_GCR_GLM_INV | + SDMA_GCR_GLM_WB) << 16); + amdgpu_ring_write(ring, 0xffffff80); + amdgpu_ring_write(ring, 0xffff); + /* An IB packet must end on a 8 DW boundary--the next dword * must be on a 8-dword boundary. Our IB packet below is 6 * dwords long, thus add x number of NOPs, such that, in @@ -746,11 +758,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) sdma_v5_0_enable(adev, true); } - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); @@ -1597,7 +1607,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ - .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ + .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib = sdma_v5_0_ring_emit_ib, .emit_fence = sdma_v5_0_ring_emit_fence, .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 4cb4c891120b..0860e85a2d35 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3439,7 +3439,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, if (adev->asic_type == CHIP_HAINAN) { if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0xC3) || (adev->pdev->device == 0x6664) || (adev->pdev->device == 0x6665) || diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c index c902f26cf50d..9bffbab35041 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c @@ -46,8 +46,7 @@ #define I2C_NO_STOP 1 #define I2C_RESTART 2 -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev -#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor) +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en) { @@ -592,7 +591,8 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control, static void lock_bus(struct i2c_adapter *i2c, unsigned int flags) { - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c); + struct amdgpu_device *adev = to_amdgpu_device(i2c); + struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control; if (!smu_v11_0_i2c_bus_lock(i2c)) { DRM_ERROR("Failed to lock the bus from SMU"); @@ -610,7 +610,8 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags) static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags) { - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c); + struct amdgpu_device *adev = to_amdgpu_device(i2c); + struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control; if (!smu_v11_0_i2c_bus_unlock(i2c)) { DRM_ERROR("Failed to unlock the bus from SMU"); @@ -630,7 +631,8 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { int i, ret; - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap); + struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); + struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control; if (!control->bus_locked) { DRM_ERROR("I2C bus unlocked, stopping transaction!"); @@ -679,7 +681,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control) control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v11_0_i2c_eeprom_i2c_algo; - snprintf(control->name, sizeof(control->name), "RAS EEPROM"); + snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM"); control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops; res = i2c_add_adapter(control); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index d8945c31b622..d42a8d8a0dea 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -569,14 +569,10 @@ static int soc15_asic_reset(struct amdgpu_device *adev) switch (soc15_asic_reset_method(adev)) { case AMD_RESET_METHOD_BACO: - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); return soc15_asic_baco_reset(adev); case AMD_RESET_METHOD_MODE2: return amdgpu_dpm_mode2_reset(adev); default: - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); return soc15_asic_mode1_reset(adev); } } @@ -852,6 +848,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev) /* change this when we implement soft reset */ return true; } + +static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) + return; + /*read back hdp ras counter to reset it to 0 */ + RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); +} + static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1) { @@ -1019,6 +1024,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .get_config_memsize = &soc15_get_config_memsize, .flush_hdp = &soc15_flush_hdp, .invalidate_hdp = &soc15_invalidate_hdp, + .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega20_doorbell_index_init, .get_pcie_usage = &vega20_get_pcie_usage, @@ -1264,6 +1270,10 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); + if (adev->asic_funcs && + adev->asic_funcs->reset_hdp_ras_error_count) + adev->asic_funcs->reset_hdp_ras_error_count(adev); + if (adev->nbio.funcs->ras_late_init) r = adev->nbio.funcs->ras_late_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index d0fb7a67c1a3..b03f950c486c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -42,6 +42,13 @@ struct soc15_reg_golden { u32 or_mask; }; +struct soc15_reg_rlcg { + u32 hwip; + u32 instance; + u32 segment; + u32 reg; +}; + struct soc15_reg_entry { uint32_t hwip; uint32_t inst; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 19e870c79896..c893c645a4b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -70,10 +70,9 @@ } \ } while (0) -#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a))) #define WREG32_RLC(reg, value) \ do { \ - if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \ + if (amdgpu_sriov_fullaccess(adev)) { \ uint32_t i = 0; \ uint32_t retries = 50000; \ uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \ @@ -98,7 +97,7 @@ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ - if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \ + if (amdgpu_sriov_fullaccess(adev)) { \ uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \ uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 793bf70e64b1..14d346321a5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, if (rsmu_umc_index_state) umc_v6_1_disable_umc_index_mode(adev); + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + DRM_WARN("Fail to disable DF-Cstate.\n"); + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { umc_reg_offset = get_umc_6_reg_offset(adev, umc_inst, @@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, &(err_data->ue_count)); } + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + DRM_WARN("Fail to enable DF-Cstate\n"); + if (rsmu_umc_index_state) umc_v6_1_enable_umc_index_mode(adev); } @@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0); } - /* skip error address process if -ENOMEM */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + + if (mc_umc_status == 0) + return; + if (!err_data->err_addr) { /* clear umc status */ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); @@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, } err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && @@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, if (rsmu_umc_index_state) umc_v6_1_disable_umc_index_mode(adev); + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + DRM_WARN("Fail to disable DF-Cstate.\n"); + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { umc_reg_offset = get_umc_6_reg_offset(adev, umc_inst, @@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, umc_inst); } + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + DRM_WARN("Fail to enable DF-Cstate\n"); + if (rsmu_umc_index_state) umc_v6_1_enable_umc_index_mode(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index b7f17342bbf0..ec8091a661df 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -29,6 +29,7 @@ #include "soc15d.h" #include "amdgpu_pm.h" #include "amdgpu_psp.h" +#include "mmsch_v2_0.h" #include "vcn/vcn_2_0_0_offset.h" #include "vcn/vcn_2_0_0_sh_mask.h" @@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); - +static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); /** * vcn_v2_0_early_init - set function pointers * @@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->vcn.num_vcn_inst = 1; - adev->vcn.num_enc_rings = 2; + if (amdgpu_sriov_vf(adev)) + adev->vcn.num_enc_rings = 1; + else + adev->vcn.num_enc_rings = 2; vcn_v2_0_set_dec_ring_funcs(adev); vcn_v2_0_set_enc_ring_funcs(adev); @@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; + else + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) @@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle) adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode; + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + return 0; } @@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_virt_free_mm_table(adev); + r = amdgpu_vcn_suspend(adev); if (r) return r; @@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle) adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, 0); + if (amdgpu_sriov_vf(adev)) + vcn_v2_0_start_sriov(adev); + r = amdgpu_ring_test_helper(ring); if (r) goto done; @@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t offset; + if (amdgpu_sriov_vf(adev)) + return; + /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, @@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; + if (amdgpu_sriov_vf(adev)) + return; + /* UVD disable CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data = 0; + if (amdgpu_sriov_vf(adev)) + return; + /* enable UVD CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) uint32_t data = 0; int ret; + if (amdgpu_sriov_vf(adev)) + return; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT @@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) uint32_t data = 0; int ret; + if (amdgpu_sriov_vf(adev)) + return; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { /* Before power off, this indicator has to be turned on */ data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); @@ -1215,6 +1246,9 @@ static int vcn_v2_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE); + if (amdgpu_sriov_vf(adev)) + return 0; + if (enable) { /* wait for STATUS to clear */ if (!vcn_v2_0_is_idle(handle)) @@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 4); if (r) @@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle, int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_sriov_vf(adev)) { + adev->vcn.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->vcn.cur_state) return 0; @@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle, return ret; } +static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev, + struct amdgpu_mm_table *table) +{ + uint32_t data = 0, loop; + uint64_t addr = table->gpu_addr; + struct mmsch_v2_0_init_header *header; + uint32_t size; + int i; + + header = (struct mmsch_v2_0_init_header *)table->cpu_addr; + size = header->header_size + header->vcn_table_size; + + /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr + * of memory descriptor location + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); + + /* 2, update vmid of descriptor */ + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); + data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + /* use domain0 for MM scheduler */ + data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); + + /* 3, notify mmsch about the size of this descriptor */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); + + /* 4, set resp to zero */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); + + adev->vcn.inst->ring_dec.wptr = 0; + adev->vcn.inst->ring_dec.wptr_old = 0; + vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec); + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + adev->vcn.inst->ring_enc[i].wptr = 0; + adev->vcn.inst->ring_enc[i].wptr_old = 0; + vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]); + } + + /* 5, kick off the initialization and wait until + * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); + + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop = 1000; + while ((data & 0x10000002) != 0x10000002) { + udelay(10); + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop--; + if (!loop) + break; + } + + if (!loop) { + DRM_ERROR("failed to init MMSCH, " \ + "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data); + return -EBUSY; + } + + return 0; +} + +static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) +{ + int r; + uint32_t tmp; + struct amdgpu_ring *ring; + uint32_t offset, size; + uint32_t table_size = 0; + struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} }; + struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} }; + struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} }; + struct mmsch_v2_0_cmd_end end = { {0} }; + struct mmsch_v2_0_init_header *header; + uint32_t *init_table = adev->virt.mm_table.cpu_addr; + uint8_t i = 0; + + header = (struct mmsch_v2_0_init_header *)init_table; + direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + direct_poll.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_POLLING; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) { + header->version = MMSCH_VERSION; + header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2; + + header->vcn_table_offset = header->header_size; + + init_table += header->vcn_table_offset; + + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + + MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + 0xFFFFFFFF, 0x00000004); + + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + tmp = AMDGPU_UCODE_ID_VCN; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[tmp].tmr_mc_addr_lo); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[tmp].tmr_mc_addr_hi); + offset = 0; + } else { + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr)); + offset = size; + } + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), + size); + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), + AMDGPU_VCN_STACK_SIZE); + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_VCN_CONTEXT_SIZE); + + for (r = 0; r < adev->vcn.num_enc_rings; ++r) { + ring = &adev->vcn.inst->ring_enc[r]; + ring->wptr = 0; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), + lower_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), + upper_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), + ring->ring_size / 4); + } + + ring = &adev->vcn.inst->ring_dec; + ring->wptr = 0; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), + lower_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), + upper_32_bits(ring->gpu_addr)); + /* force RBC into idle state */ + tmp = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + + /* add end packet */ + tmp = sizeof(struct mmsch_v2_0_cmd_end); + memcpy((void *)init_table, &end, tmp); + table_size += (tmp / 4); + header->vcn_table_size = table_size; + + } + return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); +} + static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 678253d81154..c6363f5ad564 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -74,29 +74,30 @@ static int amdgpu_ih_clientid_vcns[] = { static int vcn_v2_5_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) { - u32 harvest; - int i; - - adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); - if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) - adev->vcn.harvest_config |= 1 << i; - } - - if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | - AMDGPU_VCN_HARVEST_VCN1)) - /* both instances are harvested, disable the block */ - return -ENOENT; - } else - adev->vcn.num_vcn_inst = 1; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = 2; adev->vcn.harvest_config = 0; adev->vcn.num_enc_rings = 1; } else { + if (adev->asic_type == CHIP_ARCTURUS) { + u32 harvest; + int i; + + adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + adev->vcn.harvest_config |= 1 << i; + } + + if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | + AMDGPU_VCN_HARVEST_VCN1)) + /* both instances are harvested, disable the block */ + return -ENOENT; + } else + adev->vcn.num_vcn_inst = 1; + adev->vcn.num_enc_rings = 2; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 78b35901643b..3ce10e05d0d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -765,8 +765,6 @@ static int vi_asic_reset(struct amdgpu_device *adev) int r; if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); r = amdgpu_dpm_baco_reset(adev); } else { r = vi_asic_pci_config_reset(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 3f0300e53727..0ec5f25adf56 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -127,6 +127,8 @@ static int kfd_open(struct inode *inode, struct file *filep) return PTR_ERR(process); if (kfd_is_locked()) { + dev_dbg(kfd_device, "kfd is locked!\n" + "process %d unreferenced", process->pasid); kfd_unref_process(process); return -EAGAIN; } @@ -1167,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep, if (!dev) return -EINVAL; - dev->kfd2kgd->get_tile_config(dev->kgd, &config); + amdgpu_amdkfd_get_tile_config(dev->kgd, &config); args->gb_addr_config = config.gb_addr_config; args->num_banks = config.num_banks; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 2a9e40131735..05bc6d96ec52 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -648,6 +648,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd->kfd2kgd->get_hive_id) kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd); + if (kfd->kfd2kgd->get_unique_id) + kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd); + if (kfd_interrupt_init(kfd)) { dev_err(kfd_device, "Error initializing interrupts\n"); goto kfd_interrupt_error; @@ -710,7 +713,7 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - kgd2kfd_suspend(kfd); + kgd2kfd_suspend(kfd, false); device_queue_manager_uninit(kfd->dqm); kfd_interrupt_exit(kfd); kfd_topology_remove_device(kfd); @@ -731,7 +734,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) kfd->dqm->ops.pre_reset(kfd->dqm); - kgd2kfd_suspend(kfd); + kgd2kfd_suspend(kfd, false); kfd_signal_reset_event(kfd); return 0; @@ -765,21 +768,23 @@ bool kfd_is_locked(void) return (atomic_read(&kfd_locked) > 0); } -void kgd2kfd_suspend(struct kfd_dev *kfd) +void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { if (!kfd->init_complete) return; - /* For first KFD device suspend all the KFD processes */ - if (atomic_inc_return(&kfd_locked) == 1) - kfd_suspend_all_processes(); + /* for runtime suspend, skip locking kfd */ + if (!run_pm) { + /* For first KFD device suspend all the KFD processes */ + if (atomic_inc_return(&kfd_locked) == 1) + kfd_suspend_all_processes(); + } kfd->dqm->ops.stop(kfd->dqm); - kfd_iommu_suspend(kfd); } -int kgd2kfd_resume(struct kfd_dev *kfd) +int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { int ret, count; @@ -790,10 +795,13 @@ int kgd2kfd_resume(struct kfd_dev *kfd) if (ret) return ret; - count = atomic_dec_return(&kfd_locked); - WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); - if (count == 0) - ret = kfd_resume_all_processes(); + /* for runtime resume, skip unlocking kfd */ + if (!run_pm) { + count = atomic_dec_return(&kfd_locked); + WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); + if (count == 0) + ret = kfd_resume_all_processes(); + } return ret; } @@ -1104,9 +1112,9 @@ kfd_gtt_out: return 0; kfd_gtt_no_free_chunk: - pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); + pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); mutex_unlock(&kfd->gtt_sa_lock); - kfree(mem_obj); + kfree(*mem_obj); return -ENOMEM; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 80d22bf702e8..77ea0f0cb163 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) /* queue is available for KFD usage if bit is 1 */ for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) if (test_bit(pipe_offset + i, - dqm->dev->shared_resources.queue_bitmap)) + dqm->dev->shared_resources.cp_queue_bitmap)) return true; return false; } -unsigned int get_queues_num(struct device_queue_manager *dqm) +unsigned int get_cp_queues_num(struct device_queue_manager *dqm) { - return bitmap_weight(dqm->dev->shared_resources.queue_bitmap, + return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap, KGD_MAX_QUEUES); } @@ -109,6 +109,11 @@ static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm) return dqm->dev->device_info->num_xgmi_sdma_engines; } +static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) +{ + return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm); +} + unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { return dqm->dev->device_info->num_sdma_engines @@ -132,6 +137,22 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, qpd->sh_mem_bases); } +void increment_queue_count(struct device_queue_manager *dqm, + enum kfd_queue_type type) +{ + dqm->active_queue_count++; + if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) + dqm->active_cp_queue_count++; +} + +void decrement_queue_count(struct device_queue_manager *dqm, + enum kfd_queue_type type) +{ + dqm->active_queue_count--; + if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) + dqm->active_cp_queue_count--; +} + static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { struct kfd_dev *dev = qpd->dqm->dev; @@ -281,8 +302,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, struct mqd_manager *mqd_mgr; int retval; - print_queue(q); - dqm_lock(dqm); if (dqm->total_queue_count >= max_num_of_queues_per_device) { @@ -359,12 +378,7 @@ add_queue_to_list: list_add(&q->list, &qpd->queues_list); qpd->queue_count++; if (q->properties.is_active) - dqm->queue_count++; - - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; + increment_queue_count(dqm, q->properties.type); /* * Unconditionally increment this counter, regardless of the queue's @@ -446,15 +460,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; - if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) deallocate_hqd(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } else { + else { pr_debug("q->properties.type %d is invalid\n", q->properties.type); return -EINVAL; @@ -494,7 +506,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, } qpd->queue_count--; if (q->properties.is_active) - dqm->queue_count--; + decrement_queue_count(dqm, q->properties.type); return retval; } @@ -563,13 +575,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) /* * check active state vs. the previous state and modify * counter accordingly. map_queues_cpsch uses the - * dqm->queue_count to determine whether a new runlist must be + * dqm->active_queue_count to determine whether a new runlist must be * uploaded. */ if (q->properties.is_active && !prev_active) - dqm->queue_count++; + increment_queue_count(dqm, q->properties.type); else if (!q->properties.is_active && prev_active) - dqm->queue_count--; + decrement_queue_count(dqm, q->properties.type); if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) retval = map_queues_cpsch(dqm); @@ -618,7 +630,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = false; - dqm->queue_count--; + decrement_queue_count(dqm, q->properties.type); if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) continue; @@ -662,7 +674,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, continue; q->properties.is_active = false; - dqm->queue_count--; + decrement_queue_count(dqm, q->properties.type); } retval = execute_queues_cpsch(dqm, qpd->is_debug ? @@ -731,7 +743,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = true; - dqm->queue_count++; + increment_queue_count(dqm, q->properties.type); if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) continue; @@ -786,7 +798,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, continue; q->properties.is_active = true; - dqm->queue_count++; + increment_queue_count(dqm, q->properties.type); } retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -899,16 +911,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) mutex_init(&dqm->lock_hidden); INIT_LIST_HEAD(&dqm->queues); - dqm->queue_count = dqm->next_pipe_to_allocate = 0; - dqm->sdma_queue_count = 0; - dqm->xgmi_sdma_queue_count = 0; + dqm->active_queue_count = dqm->next_pipe_to_allocate = 0; + dqm->active_cp_queue_count = 0; for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { int pipe_offset = pipe * get_queues_per_pipe(dqm); for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) if (test_bit(pipe_offset + queue, - dqm->dev->shared_resources.queue_bitmap)) + dqm->dev->shared_resources.cp_queue_bitmap)) dqm->allocated_queues[pipe] |= 1 << queue; } @@ -924,7 +935,7 @@ static void uninitialize(struct device_queue_manager *dqm) { int i; - WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0); + WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0); kfree(dqm->allocated_queues); for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) @@ -966,8 +977,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, int bit; if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - if (dqm->sdma_bitmap == 0) + if (dqm->sdma_bitmap == 0) { + pr_err("No more SDMA queue to allocate\n"); return -ENOMEM; + } + bit = __ffs64(dqm->sdma_bitmap); dqm->sdma_bitmap &= ~(1ULL << bit); q->sdma_id = bit; @@ -976,8 +990,10 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - if (dqm->xgmi_sdma_bitmap == 0) + if (dqm->xgmi_sdma_bitmap == 0) { + pr_err("No more XGMI SDMA queue to allocate\n"); return -ENOMEM; + } bit = __ffs64(dqm->xgmi_sdma_bitmap); dqm->xgmi_sdma_bitmap &= ~(1ULL << bit); q->sdma_id = bit; @@ -1029,7 +1045,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) / dqm->dev->shared_resources.num_pipe_per_mec; - if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap)) + if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap)) continue; /* only acquire queues from the first MEC */ @@ -1064,9 +1080,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm) mutex_init(&dqm->lock_hidden); INIT_LIST_HEAD(&dqm->queues); - dqm->queue_count = dqm->processes_count = 0; - dqm->sdma_queue_count = 0; - dqm->xgmi_sdma_queue_count = 0; + dqm->active_queue_count = dqm->processes_count = 0; + dqm->active_cp_queue_count = 0; + dqm->active_runlist = false; dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); @@ -1158,7 +1174,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, dqm->total_queue_count); list_add(&kq->list, &qpd->priv_queue_list); - dqm->queue_count++; + increment_queue_count(dqm, kq->queue->properties.type); qpd->is_debug = true; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); @@ -1172,7 +1188,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, { dqm_lock(dqm); list_del(&kq->list); - dqm->queue_count--; + decrement_queue_count(dqm, kq->queue->properties.type); qpd->is_debug = false; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); /* @@ -1238,13 +1254,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, list_add(&q->list, &qpd->queues_list); qpd->queue_count++; - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; - if (q->properties.is_active) { - dqm->queue_count++; + increment_queue_count(dqm, q->properties.type); + retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); } @@ -1298,20 +1310,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr, return 0; } -static int unmap_sdma_queues(struct device_queue_manager *dqm) -{ - int i, retval = 0; - - for (i = 0; i < dqm->dev->device_info->num_sdma_engines + - dqm->dev->device_info->num_xgmi_sdma_engines; i++) { - retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i); - if (retval) - return retval; - } - return retval; -} - /* dqm->lock mutex has to be locked before calling this function */ static int map_queues_cpsch(struct device_queue_manager *dqm) { @@ -1319,7 +1317,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) if (!dqm->sched_running) return 0; - if (dqm->queue_count <= 0 || dqm->processes_count <= 0) + if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0) return 0; if (dqm->active_runlist) return 0; @@ -1349,12 +1347,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->active_runlist) return retval; - pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n", - dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count); - - if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count) - unmap_sdma_queues(dqm); - retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, filter, filter_param, false, 0); if (retval) @@ -1427,18 +1419,15 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, deallocate_doorbell(qpd, q); - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } list_del(&q->list); qpd->queue_count--; if (q->properties.is_active) { - dqm->queue_count--; + decrement_queue_count(dqm, q->properties.type); retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); if (retval == -ETIME) @@ -1648,7 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, /* Clean all kernel queues */ list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { list_del(&kq->list); - dqm->queue_count--; + decrement_queue_count(dqm, kq->queue->properties.type); qpd->is_debug = false; dqm->total_queue_count--; filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; @@ -1656,16 +1645,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, /* Clear all user mode queues */ list_for_each_entry(q, &qpd->queues_list, list) { - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } if (q->properties.is_active) - dqm->queue_count--; + decrement_queue_count(dqm, q->properties.type); dqm->total_queue_count--; } @@ -1742,14 +1728,13 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) struct kfd_dev *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * - (dev->device_info->num_sdma_engines + - dev->device_info->num_xgmi_sdma_engines) * + get_num_all_sdma_engines(dqm) * dev->device_info->num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), - (void *)&(mem_obj->cpu_ptr), true); + (void *)&(mem_obj->cpu_ptr), false); return retval; } @@ -1979,7 +1964,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { if (!test_bit(pipe_offset + queue, - dqm->dev->shared_resources.queue_bitmap)) + dqm->dev->shared_resources.cp_queue_bitmap)) continue; r = dqm->dev->kfd2kgd->hqd_dump( @@ -1995,8 +1980,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) } } - for (pipe = 0; pipe < get_num_sdma_engines(dqm) + - get_num_xgmi_sdma_engines(dqm); pipe++) { + for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { for (queue = 0; queue < dqm->dev->device_info->num_sdma_queues_per_engine; queue++) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 871d3b628d2d..50d919f814e9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -180,9 +180,8 @@ struct device_queue_manager { struct list_head queues; unsigned int saved_flags; unsigned int processes_count; - unsigned int queue_count; - unsigned int sdma_queue_count; - unsigned int xgmi_sdma_queue_count; + unsigned int active_queue_count; + unsigned int active_cp_queue_count; unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; @@ -219,7 +218,7 @@ void device_queue_manager_init_v10_navi10( struct device_queue_manager_asic_ops *asic_ops); void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd); -unsigned int get_queues_num(struct device_queue_manager *dqm); +unsigned int get_cp_queues_num(struct device_queue_manager *dqm); unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); unsigned int get_num_sdma_queues(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 1f8365575b12..15476fca8fa6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd, if (p->signal_mapped_size && p->signal_event_count == p->signal_mapped_size / 8) { if (!p->signal_event_limit_reached) { - pr_warn("Signal event wasn't created because limit was reached\n"); + pr_debug("Signal event wasn't created because limit was reached\n"); p->signal_event_limit_reached = true; } return -ENOSPC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index bb77b8890e77..78714f9a8b11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -316,7 +316,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) { /* * node id couldn't be 0 - the three MSB bits of - * aperture shoudn't be 0 + * aperture shouldn't be 0 */ pdd->lds_base = MAKE_LDS_APP_BASE_VI(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 436b7f518979..48cda3073b70 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, int retval; struct kfd_mem_obj *mqd_mem_obj = NULL; - /* From V9, for CWSR, the control stack is located on the next page - * boundary after the mqd, we will use the gtt allocation function - * instead of sub-allocation function. + /* For V9 only, due to a HW bug, the control stack of a user mode + * compute queue needs to be allocated just behind the page boundary + * of its regular MQD buffer. So we allocate an enlarged MQD buffer: + * the first page of the buffer serves as the regular MQD buffer + * purpose and the remaining is for control stack. Although the two + * parts are in the same buffer object, they need different memory + * types: MQD part needs UC (uncached) as usual, while control stack + * needs NC (non coherent), which is different from the UC type which + * is used when control stack is allocated in user space. + * + * Because of all those, we use the gtt allocation function instead + * of sub-allocation function for this enlarged MQD buffer. Moreover, + * in order to achieve two memory types in a single buffer object, we + * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct + * amdgpu memory functions to do so. */ if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index dc406e6dee23..efdb75e7677b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -47,9 +47,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm, struct kfd_dev *dev = pm->dqm->dev; process_count = pm->dqm->processes_count; - queue_count = pm->dqm->queue_count; - compute_queue_count = queue_count - pm->dqm->sdma_queue_count - - pm->dqm->xgmi_sdma_queue_count; + queue_count = pm->dqm->active_queue_count; + compute_queue_count = pm->dqm->active_cp_queue_count; /* check if there is over subscription * Note: the arbitration between the number of VMIDs and @@ -62,7 +61,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm, max_proc_per_quantum = dev->max_proc_per_quantum; if ((process_count > max_proc_per_quantum) || - compute_queue_count > get_queues_num(pm->dqm)) { + compute_queue_count > get_cp_queues_num(pm->dqm)) { *over_subscription = true; pr_debug("Over subscribed runlist\n"); } @@ -141,7 +140,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, pm->ib_size_bytes = alloc_size_bytes; pr_debug("Building runlist ib process count: %d queues count %d\n", - pm->dqm->processes_count, pm->dqm->queue_count); + pm->dqm->processes_count, pm->dqm->active_queue_count); /* build the run list ib packet */ list_for_each_entry(cur, queues, list) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 6af1b5881f43..c24cad3c64ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -41,6 +41,7 @@ #include <drm/drm_drv.h> #include <drm/drm_device.h> #include <kgd_kfd_interface.h> +#include <linux/swap.h> #include "amd_shared.h" @@ -293,6 +294,9 @@ struct kfd_dev { /* xGMI */ uint64_t hive_id; + + /* UUID */ + uint64_t unique_id; bool pci_atomic_requested; @@ -502,6 +506,9 @@ struct queue { struct kfd_process *process; struct kfd_dev *device; void *gws; + + /* procfs */ + struct kobject kobj; }; /* @@ -646,6 +653,7 @@ struct kfd_process_device { * function. */ bool already_dequeued; + bool runtime_inuse; /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ enum kfd_pdd_bound bound; @@ -729,6 +737,7 @@ struct kfd_process { /* Kobj for our procfs */ struct kobject *kobj; + struct kobject *kobj_queues; struct attribute attr_pasid; }; @@ -835,6 +844,8 @@ extern struct device *kfd_device; /* KFD's procfs */ void kfd_procfs_init(void); void kfd_procfs_shutdown(void); +int kfd_procfs_add_queue(struct queue *q); +void kfd_procfs_del_queue(struct queue *q); /* Topology */ int kfd_topology_init(void); @@ -1039,7 +1050,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev); /* Check with device cgroup if @kfd device is accessible */ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) { -#if defined(CONFIG_CGROUP_DEVICE) +#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) struct drm_device *ddev = kfd->ddev; return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 25b90f70aecd..fe0cd49d4ea7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -31,6 +31,7 @@ #include <linux/compat.h> #include <linux/mman.h> #include <linux/file.h> +#include <linux/pm_runtime.h> #include "amdgpu_amdkfd.h" #include "amdgpu.h" @@ -132,6 +133,88 @@ void kfd_procfs_shutdown(void) } } +static ssize_t kfd_procfs_queue_show(struct kobject *kobj, + struct attribute *attr, char *buffer) +{ + struct queue *q = container_of(kobj, struct queue, kobj); + + if (!strcmp(attr->name, "size")) + return snprintf(buffer, PAGE_SIZE, "%llu", + q->properties.queue_size); + else if (!strcmp(attr->name, "type")) + return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type); + else if (!strcmp(attr->name, "gpuid")) + return snprintf(buffer, PAGE_SIZE, "%u", q->device->id); + else + pr_err("Invalid attribute"); + + return 0; +} + +static struct attribute attr_queue_size = { + .name = "size", + .mode = KFD_SYSFS_FILE_MODE +}; + +static struct attribute attr_queue_type = { + .name = "type", + .mode = KFD_SYSFS_FILE_MODE +}; + +static struct attribute attr_queue_gpuid = { + .name = "gpuid", + .mode = KFD_SYSFS_FILE_MODE +}; + +static struct attribute *procfs_queue_attrs[] = { + &attr_queue_size, + &attr_queue_type, + &attr_queue_gpuid, + NULL +}; + +static const struct sysfs_ops procfs_queue_ops = { + .show = kfd_procfs_queue_show, +}; + +static struct kobj_type procfs_queue_type = { + .sysfs_ops = &procfs_queue_ops, + .default_attrs = procfs_queue_attrs, +}; + +int kfd_procfs_add_queue(struct queue *q) +{ + struct kfd_process *proc; + int ret; + + if (!q || !q->process) + return -EINVAL; + proc = q->process; + + /* Create proc/<pid>/queues/<queue id> folder */ + if (!proc->kobj_queues) + return -EFAULT; + ret = kobject_init_and_add(&q->kobj, &procfs_queue_type, + proc->kobj_queues, "%u", q->properties.queue_id); + if (ret < 0) { + pr_warn("Creating proc/<pid>/queues/%u failed", + q->properties.queue_id); + kobject_put(&q->kobj); + return ret; + } + + return 0; +} + +void kfd_procfs_del_queue(struct queue *q) +{ + if (!q) + return; + + kobject_del(&q->kobj); + kobject_put(&q->kobj); +} + int kfd_process_create_wq(void) { if (!kfd_process_wq) @@ -244,10 +327,10 @@ err_alloc_mem: static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) { struct qcm_process_device *qpd = &pdd->qpd; - uint32_t flags = ALLOC_MEM_FLAGS_GTT | - ALLOC_MEM_FLAGS_NO_SUBSTITUTE | - ALLOC_MEM_FLAGS_WRITABLE | - ALLOC_MEM_FLAGS_EXECUTABLE; + uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | + KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | + KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | + KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; void *kaddr; int ret; @@ -323,6 +406,11 @@ struct kfd_process *kfd_create_process(struct file *filep) if (ret) pr_warn("Creating pasid for pid %d failed", (int)process->lead_thread->pid); + + process->kobj_queues = kobject_create_and_add("queues", + process->kobj); + if (!process->kobj_queues) + pr_warn("Creating KFD proc/queues folder failed"); } out: if (!IS_ERR(process)) @@ -440,6 +528,16 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) kfree(pdd->qpd.doorbell_bitmap); idr_destroy(&pdd->alloc_idr); + /* + * before destroying pdd, make sure to report availability + * for auto suspend + */ + if (pdd->runtime_inuse) { + pm_runtime_mark_last_busy(pdd->dev->ddev->dev); + pm_runtime_put_autosuspend(pdd->dev->ddev->dev); + pdd->runtime_inuse = false; + } + kfree(pdd); } } @@ -457,6 +555,9 @@ static void kfd_process_wq_release(struct work_struct *work) /* Remove the procfs files */ if (p->kobj) { sysfs_remove_file(p->kobj, &p->attr_pasid); + kobject_del(p->kobj_queues); + kobject_put(p->kobj_queues); + p->kobj_queues = NULL; kobject_del(p->kobj); kobject_put(p->kobj); p->kobj = NULL; @@ -540,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, /* Indicate to other users that MM is no longer valid */ p->mm = NULL; + /* Signal the eviction fence after user mode queues are + * destroyed. This allows any BOs to be freed without + * triggering pointless evictions or waiting for fences. + */ + dma_fence_signal(p->ef); mutex_unlock(&p->mutex); @@ -591,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) { struct kfd_dev *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; - uint32_t flags = ALLOC_MEM_FLAGS_GTT | - ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE; + uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT + | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE + | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; void *kaddr; int ret; @@ -754,6 +861,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, pdd->process = p; pdd->bound = PDD_UNBOUND; pdd->already_dequeued = false; + pdd->runtime_inuse = false; list_add(&pdd->per_device_list, &p->per_device_data); /* Init idr used for memory handle translation */ @@ -843,15 +951,41 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, return ERR_PTR(-ENOMEM); } + /* + * signal runtime-pm system to auto resume and prevent + * further runtime suspend once device pdd is created until + * pdd is destroyed. + */ + if (!pdd->runtime_inuse) { + err = pm_runtime_get_sync(dev->ddev->dev); + if (err < 0) + return ERR_PTR(err); + } + err = kfd_iommu_bind_process_to_device(pdd); if (err) - return ERR_PTR(err); + goto out; err = kfd_process_device_init_vm(pdd, NULL); if (err) - return ERR_PTR(err); + goto out; + + /* + * make sure that runtime_usage counter is incremented just once + * per pdd + */ + pdd->runtime_inuse = true; return pdd; + +out: + /* balance runpm reference count and exit with error */ + if (!pdd->runtime_inuse) { + pm_runtime_mark_last_busy(dev->ddev->dev); + pm_runtime_put_autosuspend(dev->ddev->dev); + } + + return ERR_PTR(err); } struct kfd_process_device *kfd_get_first_process_device_data( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 31fcd1b51f00..084c35f55d59 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -241,23 +241,18 @@ int pqm_create_queue(struct process_queue_manager *pqm, switch (type) { case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: - if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count - >= get_num_sdma_queues(dev->dqm)) || - (type == KFD_QUEUE_TYPE_SDMA_XGMI && - dev->dqm->xgmi_sdma_queue_count - >= get_num_xgmi_sdma_queues(dev->dqm))) { - pr_debug("Over-subscription is not allowed for SDMA.\n"); - retval = -EPERM; - goto err_create_queue; - } - + /* SDMA queues are always allocated statically no matter + * which scheduler mode is used. We also do not need to + * check whether a SDMA queue can be allocated here, because + * allocate_sdma_queue() in create_queue() has the + * corresponding check logic. + */ retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; pqn->kq = NULL; retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); - pr_debug("DQM returned %d for create_queue\n", retval); print_queue(q); break; @@ -266,7 +261,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, if ((dev->dqm->sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || - (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) { + (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) { pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n"); retval = -EPERM; goto err_create_queue; @@ -278,7 +273,6 @@ int pqm_create_queue(struct process_queue_manager *pqm, pqn->q = q; pqn->kq = NULL; retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); - pr_debug("DQM returned %d for create_queue\n", retval); print_queue(q); break; case KFD_QUEUE_TYPE_DIQ: @@ -299,7 +293,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n", + pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n", pqm->process->pasid, type, retval); goto err_create_queue; } @@ -322,12 +316,16 @@ int pqm_create_queue(struct process_queue_manager *pqm, if (q) { pr_debug("PQM done creating queue\n"); + kfd_procfs_add_queue(q); print_queue_properties(&q->properties); } return retval; err_create_queue: + uninit_queue(q); + if (kq) + kernel_queue_uninit(kq, false); kfree(pqn); err_allocate_pqn: /* check if queues list is empty unregister process from device */ @@ -378,6 +376,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) } if (pqn->q) { + kfd_procfs_del_queue(pqn->q); dqm = pqn->q->device->dqm; retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); if (retval) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 203c823d65f1..aa0bfa78a667 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -490,6 +490,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.num_sdma_queues_per_engine); sysfs_show_32bit_prop(buffer, "num_cp_queues", dev->node_props.num_cp_queues); + sysfs_show_64bit_prop(buffer, "unique_id", + dev->node_props.unique_id); if (dev->gpu) { log_max_watch_addr = @@ -1318,7 +1320,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.num_gws = (hws_gws_support && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0; - dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm); + dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); + dev->node_props.unique_id = gpu->unique_id; kfd_fill_mem_clk_max_info(dev); kfd_fill_iolink_non_crat_info(dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index 74e9b1682af8..46eeecaf1b68 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -54,6 +54,7 @@ struct kfd_node_properties { uint64_t hive_id; + uint64_t unique_id; uint32_t cpu_cores_count; uint32_t simd_count; uint32_t mem_banks_count; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6240259b3a93..7fc15b82fe48 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -98,6 +98,9 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); +#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" +MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -129,9 +132,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); /* removes and deallocates the drm structures, created by the above function */ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); -static void -amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); - static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, unsigned long possible_crtcs, @@ -383,8 +383,8 @@ static void dm_pflip_high_irq(void *interrupt_params) * of pageflip completion, so last_flip_vblank is the forbidden count * for queueing new pageflips if vsync + VRR is enabled. */ - amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev, - amdgpu_crtc->crtc_id); + amdgpu_crtc->last_flip_vblank = + amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; spin_unlock_irqrestore(&adev->ddev->event_lock, flags); @@ -407,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params) if (acrtc) { acrtc_state = to_dm_crtc_state(acrtc->base.state); - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, - amdgpu_dm_vrr_active(acrtc_state)); + DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", + acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); /* Core vblank handling is done here after end of front-porch in * vrr mode, as vblank timestamping will give valid results @@ -440,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) /** * dm_crtc_high_irq() - Handles CRTC interrupt - * @interrupt_params: ignored + * @interrupt_params: used for determining the CRTC instance * * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK * event handler. @@ -454,94 +455,44 @@ static void dm_crtc_high_irq(void *interrupt_params) unsigned long flags; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); - - if (acrtc) { - acrtc_state = to_dm_crtc_state(acrtc->base.state); - - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, - amdgpu_dm_vrr_active(acrtc_state)); - - /* Core vblank handling at start of front-porch is only possible - * in non-vrr mode, as only there vblank timestamping will give - * valid results while done in front-porch. Otherwise defer it - * to dm_vupdate_high_irq after end of front-porch. - */ - if (!amdgpu_dm_vrr_active(acrtc_state)) - drm_crtc_handle_vblank(&acrtc->base); - - /* Following stuff must happen at start of vblank, for crc - * computation and below-the-range btr support in vrr mode. - */ - amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); - - if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI && - acrtc_state->vrr_params.supported && - acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { - spin_lock_irqsave(&adev->ddev->event_lock, flags); - mod_freesync_handle_v_update( - adev->dm.freesync_module, - acrtc_state->stream, - &acrtc_state->vrr_params); - - dc_stream_adjust_vmin_vmax( - adev->dm.dc, - acrtc_state->stream, - &acrtc_state->vrr_params.adjust); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - } - } -} - -#if defined(CONFIG_DRM_AMD_DC_DCN) -/** - * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs - * @interrupt params - interrupt parameters - * - * Notify DRM's vblank event handler at VSTARTUP - * - * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which: - * * We are close enough to VUPDATE - the point of no return for hw - * * We are in the fixed portion of variable front porch when vrr is enabled - * * We are before VUPDATE, where double-buffered vrr registers are swapped - * - * It is therefore the correct place to signal vblank, send user flip events, - * and update VRR. - */ -static void dm_dcn_crtc_high_irq(void *interrupt_params) -{ - struct common_irq_params *irq_params = interrupt_params; - struct amdgpu_device *adev = irq_params->adev; - struct amdgpu_crtc *acrtc; - struct dm_crtc_state *acrtc_state; - unsigned long flags; - - acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); - if (!acrtc) return; acrtc_state = to_dm_crtc_state(acrtc->base.state); - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, + DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, amdgpu_dm_vrr_active(acrtc_state), acrtc_state->active_planes); + /** + * Core vblank handling at start of front-porch is only possible + * in non-vrr mode, as only there vblank timestamping will give + * valid results while done in front-porch. Otherwise defer it + * to dm_vupdate_high_irq after end of front-porch. + */ + if (!amdgpu_dm_vrr_active(acrtc_state)) + drm_crtc_handle_vblank(&acrtc->base); + + /** + * Following stuff must happen at start of vblank, for crc + * computation and below-the-range btr support in vrr mode. + */ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); - drm_crtc_handle_vblank(&acrtc->base); + + /* BTR updates need to happen before VUPDATE on Vega and above. */ + if (adev->family < AMDGPU_FAMILY_AI) + return; spin_lock_irqsave(&adev->ddev->event_lock, flags); - if (acrtc_state->vrr_params.supported && + if (acrtc_state->stream && acrtc_state->vrr_params.supported && acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { - mod_freesync_handle_v_update( - adev->dm.freesync_module, - acrtc_state->stream, - &acrtc_state->vrr_params); + mod_freesync_handle_v_update(adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); - dc_stream_adjust_vmin_vmax( - adev->dm.dc, - acrtc_state->stream, - &acrtc_state->vrr_params.adjust); + dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream, + &acrtc_state->vrr_params.adjust); } /* @@ -554,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) * avoid race conditions between flip programming and completion, * which could cause too early flip completion events. */ - if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && + if (adev->family >= AMDGPU_FAMILY_RV && + acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && acrtc_state->active_planes == 0) { if (acrtc->event) { drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); @@ -566,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) spin_unlock_irqrestore(&adev->ddev->event_lock, flags); } -#endif static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) @@ -813,10 +764,20 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); - memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, - fw_inst_const_size); + /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, + * amdgpu_ucode_init_single_fw will load dmub firmware + * fw_inst_const part to cw0; otherwise, the firmware back door load + * will be done by dm_dmub_hw_init + */ + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, + fw_inst_const_size); + } + memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, fw_bss_data_size); + + /* Copy firmware bios info into FB memory. */ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, adev->bios_size); @@ -835,6 +796,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) hw_params.fb_base = adev->gmc.fb_start; hw_params.fb_offset = adev->gmc.aper_base; + /* backdoor load firmware and trigger dmub running */ + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) + hw_params.load_inst_const = true; + if (dmcu) hw_params.psp_version = dmcu->psp_version; @@ -897,7 +862,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.asic_id.chip_family = adev->family; - init_data.asic_id.pci_revision_id = adev->rev_id; + init_data.asic_id.pci_revision_id = adev->pdev->revision; init_data.asic_id.hw_internal_rev = adev->external_rev_id; init_data.asic_id.vram_width = adev->gmc.vram_width; @@ -972,7 +937,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) #ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->asic_type >= CHIP_RAVEN) { - adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc); + adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); if (!adev->dm.hdcp_workqueue) DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); @@ -1003,11 +968,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } -#if defined(CONFIG_DEBUG_FS) - if (dtn_debugfs_init(adev)) - DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n"); -#endif - DRM_DEBUG_DRIVER("KMS initialized.\n"); return 0; @@ -1091,9 +1051,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case CHIP_VEGA20: case CHIP_NAVI10: case CHIP_NAVI14: - case CHIP_NAVI12: case CHIP_RENOIR: return 0; + case CHIP_NAVI12: + fw_name_dmcu = FIRMWARE_NAVI12_DMCU; + break; case CHIP_RAVEN: if (ASICREV_IS_PICASSO(adev->external_rev_id)) fw_name_dmcu = FIRMWARE_RAVEN_DMCU; @@ -1204,22 +1166,21 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return 0; } - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - DRM_WARN("Only PSP firmware loading is supported for DMUB\n"); - return 0; - } - hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = - AMDGPU_UCODE_ID_DMCUB; - adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); - adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = + AMDGPU_UCODE_ID_DMCUB; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = + adev->dm.dmub_fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); - DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", - adev->dm.dmcub_fw_version); + DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", + adev->dm.dmcub_fw_version); + } + + adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); dmub_srv = adev->dm.dmub_srv; @@ -1839,8 +1800,63 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { .atomic_commit_tail = amdgpu_dm_atomic_commit_tail }; -static void -amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) +static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) +{ + u32 max_cll, min_cll, max, min, q, r; + struct amdgpu_dm_backlight_caps *caps; + struct amdgpu_display_manager *dm; + struct drm_connector *conn_base; + struct amdgpu_device *adev; + static const u8 pre_computed_values[] = { + 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, + 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; + + if (!aconnector || !aconnector->dc_link) + return; + + conn_base = &aconnector->base; + adev = conn_base->dev->dev_private; + dm = &adev->dm; + caps = &dm->backlight_caps; + caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; + caps->aux_support = false; + max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; + min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; + + if (caps->ext_caps->bits.oled == 1 || + caps->ext_caps->bits.sdr_aux_backlight_control == 1 || + caps->ext_caps->bits.hdr_aux_backlight_control == 1) + caps->aux_support = true; + + /* From the specification (CTA-861-G), for calculating the maximum + * luminance we need to use: + * Luminance = 50*2**(CV/32) + * Where CV is a one-byte value. + * For calculating this expression we may need float point precision; + * to avoid this complexity level, we take advantage that CV is divided + * by a constant. From the Euclids division algorithm, we know that CV + * can be written as: CV = 32*q + r. Next, we replace CV in the + * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just + * need to pre-compute the value of r/32. For pre-computing the values + * We just used the following Ruby line: + * (0...32).each {|cv| puts (50*2**(cv/32.0)).round} + * The results of the above expressions can be verified at + * pre_computed_values. + */ + q = max_cll >> 5; + r = max_cll % 32; + max = (1 << q) * pre_computed_values[r]; + + // min luminance: maxLum * (CV/255)^2 / 100 + q = DIV_ROUND_CLOSEST(min_cll, 255); + min = max * DIV_ROUND_CLOSEST((q * q), 100); + + caps->aux_max_input_signal = max; + caps->aux_min_input_signal = min; +} + +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; @@ -1941,19 +1957,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { aconnector->edid = NULL; - drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); + if (aconnector->dc_link->aux_mode) { + drm_dp_cec_unset_edid( + &aconnector->dm_dp_aux.aux); + } } else { aconnector->edid = - (struct edid *) sink->dc_edid.raw_edid; - + (struct edid *)sink->dc_edid.raw_edid; drm_connector_update_edid_property(connector, - aconnector->edid); - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + aconnector->edid); + + if (aconnector->dc_link->aux_mode) + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, + aconnector->edid); } - amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + update_connector_ext_caps(aconnector); } else { drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); amdgpu_dm_update_freesync_caps(connector, NULL); @@ -2169,10 +2190,10 @@ static void handle_hpd_rx_irq(void *param) } } #ifdef CONFIG_DRM_AMD_DC_HDCP - if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { - if (adev->dm.hdcp_workqueue) - hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); - } + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { + if (adev->dm.hdcp_workqueue) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); + } #endif if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || (dc_link->type == dc_connection_mst_branch)) @@ -2373,8 +2394,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; + amdgpu_dm_irq_register_interrupt( + adev, &int_params, dm_crtc_high_irq, c_irq_params); + } + + /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to + * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx + * to trigger at end of each vblank, regardless of state of the lock, + * matching DCE behaviour. + */ + for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; + i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; + i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); + + if (r) { + DRM_ERROR("Failed to add vupdate irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_dcn_crtc_high_irq, c_irq_params); + dm_vupdate_high_irq, c_irq_params); } /* Use GRPH_PFLIP interrupt */ @@ -2567,6 +2616,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 +#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) @@ -2581,9 +2631,11 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) amdgpu_acpi_get_backlight_caps(dm->adev, &caps); if (caps.caps_valid) { + dm->backlight_caps.caps_valid = true; + if (caps.aux_support) + return; dm->backlight_caps.min_input_signal = caps.min_input_signal; dm->backlight_caps.max_input_signal = caps.max_input_signal; - dm->backlight_caps.caps_valid = true; } else { dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; @@ -2591,40 +2643,95 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; } #else + if (dm->backlight_caps.aux_support) + return; + dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; #endif } +static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness) +{ + bool rc; + + if (!link) + return 1; + + rc = dc_link_set_backlight_level_nits(link, true, brightness, + AUX_BL_DEFAULT_TRANSITION_TIME_MS); + + return rc ? 0 : 1; +} + +static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps, + const uint32_t user_brightness) +{ + u32 min, max, conversion_pace; + u32 brightness = user_brightness; + + if (!caps) + goto out; + + if (!caps->aux_support) { + max = caps->max_input_signal; + min = caps->min_input_signal; + /* + * The brightness input is in the range 0-255 + * It needs to be rescaled to be between the + * requested min and max input signal + * It also needs to be scaled up by 0x101 to + * match the DC interface which has a range of + * 0 to 0xffff + */ + conversion_pace = 0x101; + brightness = + user_brightness + * conversion_pace + * (max - min) + / AMDGPU_MAX_BL_LEVEL + + min * conversion_pace; + } else { + /* TODO + * We are doing a linear interpolation here, which is OK but + * does not provide the optimal result. We probably want + * something close to the Perceptual Quantizer (PQ) curve. + */ + max = caps->aux_max_input_signal; + min = caps->aux_min_input_signal; + + brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min + + user_brightness * max; + // Multiple the value by 1000 since we use millinits + brightness *= 1000; + brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL); + } + +out: + return brightness; +} + static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); struct amdgpu_dm_backlight_caps caps; - uint32_t brightness = bd->props.brightness; + struct dc_link *link = NULL; + u32 brightness; + bool rc; amdgpu_dm_update_backlight_caps(dm); caps = dm->backlight_caps; - /* - * The brightness input is in the range 0-255 - * It needs to be rescaled to be between the - * requested min and max input signal - * - * It also needs to be scaled up by 0x101 to - * match the DC interface which has a range of - * 0 to 0xffff - */ - brightness = - brightness - * 0x101 - * (caps.max_input_signal - caps.min_input_signal) - / AMDGPU_MAX_BL_LEVEL - + caps.min_input_signal * 0x101; - - if (dc_link_set_backlight_level(dm->backlight_link, - brightness, 0)) - return 0; - else - return 1; + + link = (struct dc_link *)dm->backlight_link; + + brightness = convert_brightness(&caps, bd->props.brightness); + // Change brightness based on AUX property + if (caps.aux_support) + return set_backlight_via_aux(link, brightness); + + rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0); + + return rc ? 0 : 1; } static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) @@ -2909,6 +3016,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; + /* No userspace support. */ + dm->dc->debug.disable_tri_buf = true; + return 0; fail: kfree(aencoder); @@ -3212,7 +3322,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, const union dc_tiling_info *tiling_info, const uint64_t info, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -3224,6 +3335,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, memset(&input, 0, sizeof(input)); memset(&output, 0, sizeof(output)); + if (force_disable_dcc) + return 0; + if (!offset) return 0; @@ -3273,7 +3387,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, union dc_tiling_info *tiling_info, struct plane_size *plane_size, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { const struct drm_framebuffer *fb = &afb->base; int ret; @@ -3379,7 +3494,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, ret = fill_plane_dcc_attributes(adev, afb, format, rotation, plane_size, tiling_info, - tiling_flags, dcc, address); + tiling_flags, dcc, address, + force_disable_dcc); if (ret) return ret; } @@ -3471,7 +3587,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const struct drm_plane_state *plane_state, const uint64_t tiling_flags, struct dc_plane_info *plane_info, - struct dc_plane_address *address) + struct dc_plane_address *address, + bool force_disable_dcc) { const struct drm_framebuffer *fb = plane_state->fb; const struct amdgpu_framebuffer *afb = @@ -3511,6 +3628,9 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, case DRM_FORMAT_NV12: plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; break; + case DRM_FORMAT_P010: + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; + break; default: DRM_ERROR( "Unsupported screen format %s\n", @@ -3550,7 +3670,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->rotation, tiling_flags, &plane_info->tiling_info, &plane_info->plane_size, - &plane_info->dcc, address); + &plane_info->dcc, address, + force_disable_dcc); if (ret) return ret; @@ -3573,6 +3694,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, struct dc_plane_info plane_info; uint64_t tiling_flags; int ret; + bool force_disable_dcc = false; ret = fill_dc_scaling_info(plane_state, &scaling_info); if (ret) @@ -3587,9 +3709,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, if (ret) return ret; + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, &plane_info, - &dc_plane_state->address); + &dc_plane_state->address, + force_disable_dcc); if (ret) return ret; @@ -4200,9 +4324,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct dmcu *dmcu = core_dc->res_pool->dmcu; stream->psr_version = dmcu->dmcu_version.psr_version; - mod_build_vsc_infopacket(stream, - &stream->vsc_infopacket, - &stream->use_vsc_sdp_for_colorimetry); + + // + // should decide stream support vsc sdp colorimetry capability + // before building vsc info packet + // + stream->use_vsc_sdp_for_colorimetry = false; + if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + stream->use_vsc_sdp_for_colorimetry = + aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; + } else { + if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { + stream->use_vsc_sdp_for_colorimetry = true; + } + } + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); } } finish: @@ -4293,10 +4430,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) struct amdgpu_device *adev = crtc->dev->dev_private; int rc; - /* Do not set vupdate for DCN hardware */ - if (adev->family > AMDGPU_FAMILY_AI) - return 0; - irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -4352,8 +4485,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .set_crc_source = amdgpu_dm_crtc_set_crc_source, .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, .enable_vblank = dm_enable_vblank, .disable_vblank = dm_disable_vblank, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static enum drm_connector_status @@ -4518,6 +4653,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) i2c_del_adapter(&aconnector->i2c->base); kfree(aconnector->i2c); } + kfree(aconnector->dm_dp_aux.aux.name); kfree(connector); } @@ -4574,6 +4710,28 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) return &new_state->base; } +static int +amdgpu_dm_connector_late_register(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + int r; + + if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { + amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; + r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); + if (r) + return r; + } + +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(amdgpu_dm_connector); +#endif + + return 0; +} + static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .reset = amdgpu_dm_connector_funcs_reset, .detect = amdgpu_dm_connector_detect, @@ -4583,6 +4741,7 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_set_property = amdgpu_dm_connector_atomic_set_property, .atomic_get_property = amdgpu_dm_connector_atomic_get_property, + .late_register = amdgpu_dm_connector_late_register, .early_unregister = amdgpu_dm_connector_unregister }; @@ -4959,7 +5118,8 @@ static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { .disable = dm_crtc_helper_disable, .atomic_check = dm_crtc_helper_atomic_check, - .mode_fixup = dm_crtc_helper_mode_fixup + .mode_fixup = dm_crtc_helper_mode_fixup, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static void dm_encoder_helper_disable(struct drm_encoder *encoder) @@ -5171,6 +5331,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, uint64_t tiling_flags; uint32_t domain; int r; + bool force_disable_dcc = false; dm_plane_state_old = to_dm_plane_state(plane->state); dm_plane_state_new = to_dm_plane_state(new_state); @@ -5229,11 +5390,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; fill_plane_buffer_attributes( adev, afb, plane_state->format, plane_state->rotation, tiling_flags, &plane_state->tiling_info, &plane_state->plane_size, &plane_state->dcc, - &plane_state->address); + &plane_state->address, + force_disable_dcc); } return 0; @@ -5377,6 +5540,8 @@ static int get_plane_formats(const struct drm_plane *plane, if (plane_cap && plane_cap->pixel_format_support.nv12) formats[num_formats++] = DRM_FORMAT_NV12; + if (plane_cap && plane_cap->pixel_format_support.p010) + formats[num_formats++] = DRM_FORMAT_P010; break; case DRM_PLANE_TYPE_OVERLAY: @@ -5429,12 +5594,15 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, } if (plane->type == DRM_PLANE_TYPE_PRIMARY && - plane_cap && plane_cap->pixel_format_support.nv12) { + plane_cap && + (plane_cap->pixel_format_support.nv12 || + plane_cap->pixel_format_support.p010)) { /* This only affects YUV formats. */ drm_plane_create_color_properties( plane, BIT(DRM_COLOR_YCBCR_BT601) | - BIT(DRM_COLOR_YCBCR_BT709), + BIT(DRM_COLOR_YCBCR_BT709) | + BIT(DRM_COLOR_YCBCR_BT2020), BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | BIT(DRM_COLOR_YCBCR_FULL_RANGE), DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); @@ -5763,7 +5931,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, adev->mode_info.underscan_vborder_property, 0); - drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); + if (!aconnector->mst_port) + drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); /* This defaults to the max in the range, but we want 8bpc for non-edp. */ aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; @@ -5782,8 +5951,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, &aconnector->base.base, dm->ddev->mode_config.hdr_output_metadata_property, 0); - drm_connector_attach_vrr_capable_property( - &aconnector->base); + if (!aconnector->mst_port) + drm_connector_attach_vrr_capable_property(&aconnector->base); + #ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.hdcp_workqueue) drm_connector_attach_content_protection_property(&aconnector->base, true); @@ -5922,16 +6092,9 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, drm_connector_attach_encoder( &aconnector->base, &aencoder->base); - drm_connector_register(&aconnector->base); -#if defined(CONFIG_DEBUG_FS) - connector_debugfs_init(aconnector); - aconnector->debugfs_dpcd_address = 0; - aconnector->debugfs_dpcd_size = 0; -#endif - if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) - amdgpu_dm_initialize_dp_connector(dm, aconnector); + amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); out_free: if (res) { @@ -6113,12 +6276,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, y <= -amdgpu_crtc->max_cursor_height) return 0; - if (crtc->primary->state) { - /* avivo cursor are offset into the total surface */ - x += crtc->primary->state->src_x >> 16; - y += crtc->primary->state->src_y >> 16; - } - if (x < 0) { xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); x = 0; @@ -6128,6 +6285,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, y = 0; } position->enable = true; + position->translate_by_source = true; position->x = x; position->y = y; position->x_hotspot = xorigin; @@ -6414,7 +6572,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool pflip_present = false; - bool swizzle = true; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -6460,9 +6617,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dc_plane = dm_new_plane_state->dc_state; - if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle) - swizzle = false; - bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; @@ -6514,7 +6668,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, &bundle->plane_infos[planes_count], - &bundle->flip_addrs[planes_count].address); + &bundle->flip_addrs[planes_count].address, + false); + + DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n", + new_plane_state->plane->index, + bundle->plane_infos[planes_count].dcc.enable); bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; @@ -6563,7 +6722,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * clients using the GLX_OML_sync_control extension or * DRI3/Present extension with defined target_msc. */ - last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id); + last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); } else { /* For variable refresh rate mode only: @@ -6592,7 +6751,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (int)(target_vblank - - amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) { + amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { usleep_range(1000, 1100); } @@ -6671,8 +6830,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, amdgpu_dm_link_setup_psr(acrtc_state->stream); else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && acrtc_state->stream->link->psr_feature_enabled && - !acrtc_state->stream->link->psr_allow_active && - swizzle) { + !acrtc_state->stream->link->psr_allow_active) { amdgpu_dm_psr_enable(acrtc_state->stream); } @@ -7697,6 +7855,7 @@ static int dm_update_plane_state(struct dc *dc, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; + struct amdgpu_crtc *new_acrtc; bool needs_reset; int ret = 0; @@ -7706,9 +7865,23 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state = to_dm_plane_state(new_plane_state); dm_old_plane_state = to_dm_plane_state(old_plane_state); - /*TODO Implement atomic check for cursor plane */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) + /*TODO Implement better atomic check for cursor plane */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if (!enable || !new_plane_crtc || + drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + new_acrtc = to_amdgpu_crtc(new_plane_crtc); + + if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) || + (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) { + DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n", + new_plane_state->crtc_w, new_plane_state->crtc_h); + return -EINVAL; + } + return 0; + } needs_reset = should_reset_plane(state, plane, old_plane_state, new_plane_state); @@ -7935,7 +8108,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, ret = fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, plane_info, - &flip_addr->address); + &flip_addr->address, + false); if (ret) goto cleanup; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 7ea9acb0358d..5cab3e65d992 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -90,15 +90,41 @@ struct dm_comressor_info { }; /** - * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI - * @min_input_signal: minimum possible input in range 0-255 - * @max_input_signal: maximum possible input in range 0-255 - * @caps_valid: true if these values are from the ACPI interface + * struct amdgpu_dm_backlight_caps - Information about backlight + * + * Describe the backlight support for ACPI or eDP AUX. */ struct amdgpu_dm_backlight_caps { + /** + * @ext_caps: Keep the data struct with all the information about the + * display support for HDR. + */ + union dpcd_sink_ext_caps *ext_caps; + /** + * @aux_min_input_signal: Min brightness value supported by the display + */ + u32 aux_min_input_signal; + /** + * @aux_max_input_signal: Max brightness value supported by the display + * in nits. + */ + u32 aux_max_input_signal; + /** + * @min_input_signal: minimum possible input in range 0-255. + */ int min_input_signal; + /** + * @max_input_signal: maximum possible input in range 0-255. + */ int max_input_signal; + /** + * @caps_valid: true if these values are from the ACPI interface. + */ bool caps_valid; + /** + * @aux_support: Describes if the display supports AUX backlight. + */ + bool aux_support; }; /** @@ -457,6 +483,9 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, struct dc_plane_state *dc_plane_state); +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_dm_connector *aconnector); + extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f81d3439ee8c..0461fecd68db 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -32,6 +32,19 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_debugfs.h" #include "dm_helpers.h" +#include "dmub/inc/dmub_srv.h" + +struct dmub_debugfs_trace_header { + uint32_t entry_count; + uint32_t reserved[3]; +}; + +struct dmub_debugfs_trace_entry { + uint32_t trace_code; + uint32_t tick_count; + uint32_t param0; + uint32_t param1; +}; /* function description * get/ set DP configuration: lane_count, link_rate, spread_spectrum @@ -675,6 +688,73 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us return bytes_from_user; } +/** + * Returns the DMCUB tracebuffer contents. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer + */ +static int dmub_tracebuffer_show(struct seq_file *m, void *data) +{ + struct amdgpu_device *adev = m->private; + struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; + struct dmub_debugfs_trace_entry *entries; + uint8_t *tbuf_base; + uint32_t tbuf_size, max_entries, num_entries, i; + + if (!fb_info) + return 0; + + tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr; + if (!tbuf_base) + return 0; + + tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size; + max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) / + sizeof(struct dmub_debugfs_trace_entry); + + num_entries = + ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count; + + num_entries = min(num_entries, max_entries); + + entries = (struct dmub_debugfs_trace_entry + *)(tbuf_base + + sizeof(struct dmub_debugfs_trace_header)); + + for (i = 0; i < num_entries; ++i) { + struct dmub_debugfs_trace_entry *entry = &entries[i]; + + seq_printf(m, + "trace_code=%u tick_count=%u param0=%u param1=%u\n", + entry->trace_code, entry->tick_count, entry->param0, + entry->param1); + } + + return 0; +} + +/** + * Returns the DMCUB firmware state contents. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state + */ +static int dmub_fw_state_show(struct seq_file *m, void *data) +{ + struct amdgpu_device *adev = m->private; + struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; + uint8_t *state_base; + uint32_t state_size; + + if (!fb_info) + return 0; + + state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr; + if (!state_base) + return 0; + + state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size; + + return seq_write(m, state_base, state_size); +} + /* * Returns the current and maximum output bpc for the connector. * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc @@ -880,6 +960,8 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf, return read_size - r; } +DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); +DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); DEFINE_SHOW_ATTRIBUTE(output_bpc); DEFINE_SHOW_ATTRIBUTE(vrr_range); @@ -1008,6 +1090,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, &force_yuv420_output_fops); + connector->debugfs_dpcd_address = 0; + connector->debugfs_dpcd_size = 0; + } /* @@ -1188,5 +1273,11 @@ int dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, &visual_confirm_fops); + debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root, + adev, &dmub_tracebuffer_fops); + + debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root, + adev, &dmub_fw_state_fops); + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 0acd3409dd6c..dcf84a61de37 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -28,6 +28,13 @@ #include "amdgpu_dm.h" #include "dm_helpers.h" #include <drm/drm_hdcp.h> +#include "hdcp_psp.h" + +/* + * If the SRM version being loaded is less than or equal to the + * currently loaded SRM, psp will return 0xFFFF as the version + */ +#define PSP_SRM_VERSION_MAX 0xFFFF static bool lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) @@ -67,6 +74,59 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); } +static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) +{ + + struct ta_hdcp_shared_memory *hdcp_cmd; + + if (!psp->hdcp_context.hdcp_initialized) { + DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized."); + return NULL; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return NULL; + + *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version; + *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size; + + + return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf; +} + +static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version) +{ + + struct ta_hdcp_shared_memory *hdcp_cmd; + + if (!psp->hdcp_context.hdcp_initialized) { + DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized."); + return -EINVAL; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size); + hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || + hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX) + return -EINVAL; + + *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version; + return 0; +} + static void process_output(struct hdcp_workqueue *hdcp_work) { struct mod_hdcp_output output = hdcp_work->output; @@ -88,6 +148,18 @@ static void process_output(struct hdcp_workqueue *hdcp_work) schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0)); } +static void link_lock(struct hdcp_workqueue *work, bool lock) +{ + + int i = 0; + + for (i = 0; i < work->max_link; i++) { + if (lock) + mutex_lock(&work[i].mutex); + else + mutex_unlock(&work[i].mutex); + } +} void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, @@ -112,11 +184,21 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; if (enable_encryption) { + /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp + * (s3 resume case) + */ + if (hdcp_work->srm_size > 0) + psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size, + &hdcp_work->srm_version); + display->adjust.disable = 0; - if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { + hdcp_w->link.adjust.hdcp1.disable = 0; hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; - else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) + } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { + hdcp_w->link.adjust.hdcp1.disable = 1; hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; + } schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); @@ -184,7 +266,7 @@ static void event_callback(struct work_struct *work) mutex_lock(&hdcp_work->mutex); - cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + cancel_delayed_work(&hdcp_work->callback_dwork); mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, &hdcp_work->output); @@ -265,6 +347,8 @@ static void event_watchdog_timer(struct work_struct *work) mutex_lock(&hdcp_work->mutex); + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, &hdcp_work->output); @@ -301,8 +385,9 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work) cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); } + kfree(hdcp_work->srm); + kfree(hdcp_work->srm_temp); kfree(hdcp_work); - } static void update_config(void *handle, struct cp_psp_stream_config *config) @@ -313,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; - memset(display, 0, sizeof(*display)); - memset(link, 0, sizeof(*link)); - - display->index = aconnector->base.index; - if (config->dpms_off) { hdcp_remove_display(hdcp_work, link_index, aconnector); return; } + + memset(display, 0, sizeof(*display)); + memset(link, 0, sizeof(*link)); + + display->index = aconnector->base.index; display->state = MOD_HDCP_DISPLAY_ACTIVE; if (aconnector->dc_sink != NULL) @@ -332,26 +417,171 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dig_be = config->link_enc_inst; link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + link->dp.mst_supported = config->mst_supported; display->adjust.disable = 1; - link->adjust.auth_delay = 2; + link->adjust.auth_delay = 3; + link->adjust.hdcp1.disable = 0; hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false); } -struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) + +/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel + * will automatically call once or twice depending on the size + * + * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is + * + * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096), + * srm_data_write can be called multiple times. + * + * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on + * the last call we will send the full SRM. PSP will fail on every call before the last. + * + * This means we don't know if the SRM is good until the last call. And because of this limitation we + * cannot throw errors early as it will stop the kernel from writing to sysfs + * + * Example 1: + * Good SRM size = 5096 + * first call to write 4096 -> PSP fails + * Second call to write 1000 -> PSP Pass -> SRM is set + * + * Example 2: + * Bad SRM size = 4096 + * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this + * is the last call) + * + * Solution?: + * 1: Parse the SRM? -> It is signed so we don't know the EOF + * 2: We can have another sysfs that passes the size before calling set. -> simpler solution + * below + * + * Easy Solution: + * Always call get after Set to verify if set was successful. + * +----------------------+ + * | Why it works: | + * +----------------------+ + * PSP will only update its srm if its older than the one we are trying to load. + * Always do set first than get. + * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer + * version and save it + * + * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the + * same(newer) version back and save it + * + * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is + * incorrect/corrupted and we should correct our SRM by getting it from PSP + */ +static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, + loff_t pos, size_t count) +{ + struct hdcp_workqueue *work; + uint32_t srm_version = 0; + + work = container_of(bin_attr, struct hdcp_workqueue, attr); + link_lock(work, true); + + memcpy(work->srm_temp + pos, buffer, count); + + if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) { + DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version); + memcpy(work->srm, work->srm_temp, pos + count); + work->srm_size = pos + count; + work->srm_version = srm_version; + } + + + link_lock(work, false); + + return count; +} + +static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, + loff_t pos, size_t count) +{ + struct hdcp_workqueue *work; + uint8_t *srm = NULL; + uint32_t srm_version; + uint32_t srm_size; + size_t ret = count; + + work = container_of(bin_attr, struct hdcp_workqueue, attr); + + link_lock(work, true); + + srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size); + + if (!srm) + return -EINVAL; + + if (pos >= srm_size) + ret = 0; + + if (srm_size - pos < count) { + memcpy(buffer, srm + pos, srm_size - pos); + ret = srm_size - pos; + goto ret; + } + + memcpy(buffer, srm + pos, count); + +ret: + link_lock(work, false); + return ret; +} + +/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory. + * + * For example, + * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" + * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent + * across boot/reboots/suspend/resume/shutdown + * + * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need + * to make the SRM persistent. + * + * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory. + * -The kernel cannot write to the file systems. + * -So we need usermode to do this for us, which is why an interface for usermode is needed + * + * + * + * Usermode can read/write to/from PSP using the sysfs interface + * For example: + * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile + * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm + */ +static const struct bin_attribute data_attr = { + .attr = {.name = "hdcp_srm", .mode = 0664}, + .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */ + .write = srm_data_write, + .read = srm_data_read, +}; + + +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc) { int max_caps = dc->caps.max_links; - struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL); + struct hdcp_workqueue *hdcp_work; int i = 0; + hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL); if (hdcp_work == NULL) + return NULL; + + hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL); + + if (hdcp_work->srm == NULL) + goto fail_alloc_context; + + hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL); + + if (hdcp_work->srm_temp == NULL) goto fail_alloc_context; hdcp_work->max_link = max_caps; for (i = 0; i < max_caps; i++) { - mutex_init(&hdcp_work[i].mutex); INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); @@ -360,7 +590,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); - hdcp_work[i].hdcp.config.psp.handle = psp_context; + hdcp_work[i].hdcp.config.psp.handle = &adev->psp; hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; @@ -371,9 +601,17 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c cp_psp->funcs.update_stream_config = update_config; cp_psp->handle = hdcp_work; + /* File created at /sys/class/drm/card0/device/hdcp_srm*/ + hdcp_work[0].attr = data_attr; + + if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr)) + DRM_WARN("Failed to create device file hdcp_srm"); + return hdcp_work; fail_alloc_context: + kfree(hdcp_work->srm); + kfree(hdcp_work->srm_temp); kfree(hdcp_work); return NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 6abde86bce4a..5159b3a5e5b0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -30,6 +30,7 @@ #include "hdcp.h" #include "dc.h" #include "dm_cp_psp.h" +#include "amdgpu.h" struct mod_hdcp; struct mod_hdcp_link; @@ -52,6 +53,12 @@ struct hdcp_workqueue { enum mod_hdcp_encryption_status encryption_status; uint8_t max_link; + + uint8_t *srm; + uint8_t *srm_temp; + uint32_t srm_version; + uint32_t srm_size; + struct bin_attribute attr; }; void hdcp_update_display(struct hdcp_workqueue *hdcp_work, @@ -64,6 +71,6 @@ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); void hdcp_destroy(struct hdcp_workqueue *work); -struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc); +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc); #endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 318b474ff20e..c20fb08c450b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); - return false; + DRM_ERROR("Failed to find connector for link!"); + return false; } if (boot) { @@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); - return; + DRM_ERROR("Failed to find connector for link!"); + return; } DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", @@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c( bool result; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link) struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - BUG_ON("Failed to found connector for link!"); + BUG_ON("Failed to find connector for link!"); return true; } @@ -580,6 +580,20 @@ enum dc_edid_status dm_helpers_read_local_edid( /* We don't need the original edid anymore */ kfree(edid); + /* connector->display_info will be parsed from EDID and saved + * into drm_connector->display_info from edid by call stack + * below: + * drm_parse_ycbcr420_deep_color_info + * drm_parse_hdmi_forum_vsdb + * drm_parse_cea_ext + * drm_add_display_info + * drm_connector_update_edid_property + * + * drm_connector->display_info will be used by amdgpu_dm funcs, + * like fill_stream_properties_from_drm_display_mode + */ + amdgpu_dm_update_connector_after_detect(aconnector); + edid_status = dm_helpers_parse_edid_caps( ctx, &sink->dc_edid, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index da73161043d5..d2917759b7ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -154,15 +154,18 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - struct drm_dp_mst_port *port = amdgpu_dm_connector->port; + int r; + + r = drm_dp_mst_connector_late_register(connector, + amdgpu_dm_connector->port); + if (r < 0) + return r; #if defined(CONFIG_DEBUG_FS) connector_debugfs_init(amdgpu_dm_connector); - amdgpu_dm_connector->debugfs_dpcd_address = 0; - amdgpu_dm_connector->debugfs_dpcd_size = 0; #endif - return drm_dp_mst_connector_late_register(connector, port); + return 0; } static void @@ -204,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, dsc_caps, NULL, - &dc_sink->sink_dsc_caps.dsc_dec_caps)) + &dc_sink->dsc_caps.dsc_dec_caps)) return false; return true; @@ -259,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) #if defined(CONFIG_DRM_AMD_DC_DCN) if (!validate_dsc_caps_on_connector(aconnector)) - memset(&aconnector->dc_sink->sink_dsc_caps, - 0, sizeof(aconnector->dc_sink->sink_dsc_caps)); + memset(&aconnector->dc_sink->dsc_caps, + 0, sizeof(aconnector->dc_sink->dsc_caps)); #endif } } @@ -407,6 +410,14 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, drm_connector_attach_encoder(&aconnector->base, &aconnector->mst_encoder->base); + connector->max_bpc_property = master->base.max_bpc_property; + if (connector->max_bpc_property) + drm_connector_attach_max_bpc_property(connector, 8, 16); + + connector->vrr_capable_property = master->base.vrr_capable_property; + if (connector->vrr_capable_property) + drm_connector_attach_vrr_capable_property(connector); + drm_object_attach_property( &connector->base, dev->mode_config.path_property, @@ -437,9 +448,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { - struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); - struct drm_device *dev = master->base.dev; - struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", @@ -455,39 +463,25 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, } drm_connector_unregister(connector); - if (adev->mode_info.rfbdev) - drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector); drm_connector_put(connector); } -static void dm_dp_mst_register_connector(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; - - if (adev->mode_info.rfbdev) - drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); - else - DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); - - drm_connector_register(connector); -} - static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { .add_connector = dm_dp_add_mst_connector, .destroy_connector = dm_dp_destroy_mst_connector, - .register_connector = dm_dp_mst_register_connector }; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, - struct amdgpu_dm_connector *aconnector) + struct amdgpu_dm_connector *aconnector, + int link_index) { - aconnector->dm_dp_aux.aux.name = "dmdc"; - aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev; + aconnector->dm_dp_aux.aux.name = + kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", + link_index); aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; - drm_dp_aux_register(&aconnector->dm_dp_aux.aux); + drm_dp_aux_init(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, &aconnector->base); @@ -548,7 +542,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); if (vars[i].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], - ¶ms[i].sink->sink_dsc_caps.dsc_dec_caps, + ¶ms[i].sink->dsc_caps.dsc_dec_caps, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, 0, params[i].timing, @@ -569,7 +563,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); dc_dsc_compute_config( param.sink->ctx->dc->res_pool->dscs[0], - ¶m.sink->sink_dsc_caps.dsc_dec_caps, + ¶m.sink->dsc_caps.dsc_dec_caps, param.sink->ctx->dc->debug.dsc_min_slice_height_override, (int) kbps, param.timing, &dsc_config); @@ -766,14 +760,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].sink = stream->sink; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; params[count].port = aconnector->port; - params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported; + params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp, dsc_policy.max_target_bpp, - &stream->sink->sink_dsc_caps.dsc_dec_caps, + &stream->sink->dsc_caps.dsc_dec_caps, &stream->timing, ¶ms[count].bw_range)) params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); @@ -855,7 +849,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (!aconnector || !aconnector->dc_sink) continue; - if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported) + if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) continue; if (computed_streams[i]) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index d6813ce67bbd..d2c56579a2cc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -32,7 +32,8 @@ struct amdgpu_dm_connector; int dm_mst_get_pbn_divider(struct dc_link *link); void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, - struct amdgpu_dm_connector *aconnector); + struct amdgpu_dm_connector *aconnector, + int link_index); #if defined(CONFIG_DRM_AMD_DC_DCN) bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 2f1c9584ac32..37fa7b48250e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 *get_bios_object( && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_4->display_path[i]; } - /* fall through */ + fallthrough; case OBJECT_TYPE_CONNECTOR: case OBJECT_TYPE_GENERIC: /* Both Generic and Connector Object ID @@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 *get_bios_object( && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_4->display_path[i]; } - /* fall through */ + fallthrough; default: return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index c4ba6e84db65..8edc2506d49e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -221,8 +221,8 @@ static void init_transmitter_control(struct bios_parser *bp) uint8_t frev; uint8_t crev; - if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false) - BREAK_TO_DEBUGGER(); + BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev); + switch (crev) { case 6: bp->cmd_tbl.transmitter_control = transmitter_control_v1_6; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 7388c987c595..204d7942a6e5 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -53,25 +53,18 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCE_VERSION_11_2: case DCE_VERSION_11_22: + case DCE_VERSION_12_0: + case DCE_VERSION_12_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; #if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: - *h = dal_cmd_tbl_helper_dce112_get_table2(); - return true; -#endif - case DCN_VERSION_2_0: - *h = dal_cmd_tbl_helper_dce112_get_table2(); - return true; case DCN_VERSION_2_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; - case DCE_VERSION_12_0: - case DCE_VERSION_12_1: - *h = dal_cmd_tbl_helper_dce112_get_table2(); - return true; +#endif default: /* Unsupported DCE */ diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 5d081c42e81b..2c6db379afae 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx, bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[0].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].b_mark = + calcs_output->stutter_entry_wm_ns[0].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].b_mark = + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].b_mark = + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].b_mark = + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); + stutter_entry_watermark[9], bw_int_to_fixed(1000))); calcs_output->urgent_wm_ns[0].b_mark = bw_fixed_to_int(bw_mul(data-> diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 1a37550731de..3960a8db94cb 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -703,11 +703,24 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, } -unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev) +unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id) { - /* for dali & pollock, the highest voltage level we want is 0 */ - if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev)) - return 0; + /* for low power RV2 variants, the highest voltage level we want is 0 */ + if (ASICREV_IS_RAVEN2(hw_internal_rev)) + switch (pci_revision_id) { + case PRID_DALI_DE: + case PRID_DALI_DF: + case PRID_DALI_E3: + case PRID_DALI_E4: + case PRID_POLLOCK_94: + case PRID_POLLOCK_95: + case PRID_POLLOCK_E9: + case PRID_POLLOCK_EA: + case PRID_POLLOCK_EB: + return 0; + default: + break; + } /* we are ok with all levels */ return 4; @@ -1277,7 +1290,9 @@ bool dcn_validate_bandwidth( PERFORMANCE_TRACE_END(); BW_VAL_TRACE_FINISH(); - if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev)) + if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level( + dc->ctx->asic_id.hw_internal_rev, + dc->ctx->asic_id.pci_revision_id)) return true; else return false; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index a78e5c74c79c..8ec2dfe45d40 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -63,6 +63,25 @@ int clk_mgr_helper_get_active_display_cnt( return display_count; } +int clk_mgr_helper_get_active_plane_cnt( + struct dc *dc, + struct dc_state *context) +{ + int i, total_plane_count; + + total_plane_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_status stream_status = context->stream_status[i]; + + /* + * Sum up plane_count for all streams ( active and virtual ). + */ + total_plane_count += stream_status.plane_count; + } + + return total_plane_count; +} + void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) { struct dc_link *edp_link = get_edp_link(dc); @@ -134,13 +153,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p #if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: - if (ASICREV_IS_DALI(asic_id.hw_internal_rev) || - ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) { - /* TEMP: this check has to come before ASICREV_IS_RENOIR */ - /* which also incorrectly returns true for Dali/Pollock*/ - rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); - break; - } if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 49ce46b543ea..55d09adbf0d9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -115,12 +115,11 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, dpp_inst = i; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; - prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i]; - if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) { + if (safe_to_lower || prev_dppclk_khz < dppclk_khz) clk_mgr->dccg->funcs->update_dpp_dto( clk_mgr->dccg, dpp_inst, dppclk_khz); - } } } @@ -158,6 +157,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, bool dpp_clock_lowered = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; bool force_reset = false; + bool p_state_change_support; + int total_plane_count; if (dc->work_arounds.skip_clock_update) return; @@ -213,9 +214,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000); } - if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); + p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); + if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; + clk_mgr_base->clks.p_state_change_support = p_state_change_support; if (pp_smu && pp_smu->set_pstate_handshake_support) pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 9ef3f7b91a1d..24c5765890fa 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -46,6 +46,7 @@ /* Constants */ #define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */ +#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */ /* Macros */ @@ -405,7 +406,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr) clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; } -void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) +static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) { int i, num_valid_sets; @@ -465,16 +466,15 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) { struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug; - struct pp_smu_wm_range_sets ranges = {0}; struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu; if (!debug->disable_pplib_wm_range) { - build_watermark_ranges(clk_mgr_base->bw_params, &ranges); + build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges); /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) - pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges); } } @@ -504,7 +504,7 @@ static struct clk_mgr_funcs dcn21_funcs = { .notify_wm_ranges = rn_notify_wm_ranges }; -struct clk_bw_params rn_bw_params = { +static struct clk_bw_params rn_bw_params = { .vram_type = Ddr4MemType, .num_channels = 1, .clk_table = { @@ -544,7 +544,7 @@ struct clk_bw_params rn_bw_params = { }; -struct wm_table ddr4_wm_table = { +static struct wm_table ddr4_wm_table = { .entries = { { .wm_inst = WM_A, @@ -581,7 +581,7 @@ struct wm_table ddr4_wm_table = { } }; -struct wm_table lpddr4_wm_table = { +static struct wm_table lpddr4_wm_table = { .entries = { { .wm_inst = WM_A, @@ -643,7 +643,7 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params /* Find lowest DPM, FCLK is filled in reverse order*/ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { - if (clock_table->FClocks[i].Freq != 0) { + if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) { j = i; break; } @@ -721,6 +721,13 @@ void rn_clk_mgr_construct( } else { struct clk_log_info log_info = {0}; + clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr); + + /* SMU Version 55.51.0 and up no longer have an issue + * that needs to limit minimum dispclk */ + if (clk_mgr->smu_ver >= SMU_VER_55_51_0) + debug->min_disp_clk_khz = 0; + /* TODO: Check we get what we expect during bringup */ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 04441dbcba76..47431ca6986d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, int i = 0; bool ret = false; + stream->adjust = *adjust; + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -701,7 +703,7 @@ static bool dc_construct(struct dc *dc, dc_ctx->created_bios = true; } - + dc->vendor_signature = init_params->vendor_signature; /* Create GPIO service */ dc_ctx->gpio_service = dal_gpio_service_create( @@ -761,6 +763,28 @@ static bool disable_all_writeback_pipes_for_stream( return true; } +void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock) +{ + int i = 0; + + /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ + if (dc->hwss.interdependent_update_lock) + dc->hwss.interdependent_update_lock(dc, context, lock); + else { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + // Copied conditions that were previously in dce110_apply_ctx_for_surface + if (stream == pipe_ctx->stream) { + if (!pipe_ctx->top_pipe && + (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) + dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); + } + } + } +} + static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; @@ -786,11 +810,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) if (should_disable && old_stream) { dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); - if (dc->hwss.apply_ctx_for_surface) + + if (dc->hwss.apply_ctx_for_surface) { + apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); + apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); + dc->hwss.post_unlock_program_front_end(dc, dangling_context); + } + if (dc->hwss.program_front_end_for_ctx) { + dc->hwss.interdependent_update_lock(dc, dc->current_state, true); + dc->hwss.program_front_end_for_ctx(dc, dangling_context); + dc->hwss.interdependent_update_lock(dc, dc->current_state, false); + dc->hwss.post_unlock_program_front_end(dc, dangling_context); + } } - if (dc->hwss.program_front_end_for_ctx) - dc->hwss.program_front_end_for_ctx(dc, dangling_context); } current_ctx = dc->current_state; @@ -801,11 +834,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; - int count = 0; - struct pipe_ctx *pipe; PERF_TRACE(); for (i = 0; i < MAX_PIPES; i++) { - pipe = &context->res_ctx.pipe_ctx[i]; + int count = 0; + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->plane_state) continue; @@ -1210,16 +1242,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c /* re-program planes for existing stream, in case we need to * free up plane resource for later use */ - if (dc->hwss.apply_ctx_for_surface) + if (dc->hwss.apply_ctx_for_surface) { for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->mode_changed) continue; - + apply_ctx_interdependent_lock(dc, context, context->streams[i], true); dc->hwss.apply_ctx_for_surface( dc, context->streams[i], context->stream_status[i].plane_count, context); /* use new pipe config in new context */ + apply_ctx_interdependent_lock(dc, context, context->streams[i], false); + dc->hwss.post_unlock_program_front_end(dc, context); } + } /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1238,19 +1273,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } /* Program all planes within new context*/ - if (dc->hwss.program_front_end_for_ctx) + if (dc->hwss.program_front_end_for_ctx) { + dc->hwss.interdependent_update_lock(dc, context, true); dc->hwss.program_front_end_for_ctx(dc, context); + dc->hwss.interdependent_update_lock(dc, context, false); + dc->hwss.post_unlock_program_front_end(dc, context); + } for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; if (!context->streams[i]->mode_changed) continue; - if (dc->hwss.apply_ctx_for_surface) + if (dc->hwss.apply_ctx_for_surface) { + apply_ctx_interdependent_lock(dc, context, context->streams[i], true); dc->hwss.apply_ctx_for_surface( dc, context->streams[i], context->stream_status[i].plane_count, context); + apply_ctx_interdependent_lock(dc, context, context->streams[i], false); + dc->hwss.post_unlock_program_front_end(dc, context); + } /* * enable stereo @@ -1318,10 +1361,24 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return (result == DC_OK); } -bool dc_is_hw_initialized(struct dc *dc) +static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) { - struct dc_bios *dcb = dc->ctx->dc_bios; - return dcb->funcs->is_accelerated_mode(dcb); + int i; + struct pipe_ctx *pipe; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state) + continue; + + /* Must set to false to start with, due to OR in update function */ + pipe->plane_state->status.is_flip_pending = false; + dc->hwss.update_pending_status(pipe); + if (pipe->plane_state->status.is_flip_pending) + return true; + } + return false; } bool dc_post_update_surfaces_to_stream(struct dc *dc) @@ -1329,11 +1386,14 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) int i; struct dc_state *context = dc->current_state; - if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0) + if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0) return true; post_surface_trace(dc); + if (is_flip_pending_in_pipes(dc, context)) + return true; + for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { @@ -1341,9 +1401,11 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); } + dc->hwss.optimize_bandwidth(dc, context); + dc->optimized_required = false; + dc->wm_optimized_required = false; - dc->hwss.optimize_bandwidth(dc, context); return true; } @@ -1665,6 +1727,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->coeff_reduction_factor) update_flags->bits.coeff_reduction_change = 1; + if (u->gamut_remap_matrix) + update_flags->bits.gamut_remap_change = 1; + if (u->gamma) { enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; @@ -1690,7 +1755,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (update_flags->bits.input_csc_change || update_flags->bits.coeff_reduction_change - || update_flags->bits.gamma_change) { + || update_flags->bits.gamma_change + || update_flags->bits.gamut_remap_change) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } @@ -1734,14 +1800,15 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->wb_update) su_flags->bits.wb_update = 1; + + if (stream_update->dsc_config) + su_flags->bits.dsc_changed = 1; + if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; if (stream_update->output_csc_transform || stream_update->output_color_space) su_flags->bits.out_csc = 1; - - if (stream_update->dsc_config) - overall_type = UPDATE_TYPE_FULL; } for (i = 0 ; i < surface_count; i++) { @@ -1776,8 +1843,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream( type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); if (type == UPDATE_TYPE_FULL) { - if (stream_update) + if (stream_update) { + uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; stream_update->stream->update_flags.raw = 0xFFFFFFFF; + stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; + } for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0xFFFFFFFF; } @@ -1791,6 +1861,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream( } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { dc->optimized_required = true; } + + dc->optimized_required |= dc->wm_optimized_required; } return type; @@ -1829,6 +1901,8 @@ static void copy_surface_update_to_plane( surface->time.index++; if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) surface->time.index = 0; + + surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; } if (srf_update->scaling_info) { @@ -1928,6 +2002,10 @@ static void copy_surface_update_to_plane( if (srf_update->coeff_reduction_factor) surface->coeff_reduction_factor = *srf_update->coeff_reduction_factor; + + if (srf_update->gamut_remap_matrix) + surface->gamut_remap_matrix = + *srf_update->gamut_remap_matrix; } static void copy_stream_update_to_stream(struct dc *dc, @@ -2093,18 +2171,14 @@ static void commit_planes_do_stream_update(struct dc *dc, } } - if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) { - dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true); - dp_update_dsc_config(pipe_ctx); - dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false); - } /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; - if (stream_update->dpms_off) { - dc->hwss.pipe_control_lock(dc, pipe_ctx, true); + if (stream_update->dsc_config) + dp_update_dsc_config(pipe_ctx); + if (stream_update->dpms_off) { if (*stream_update->dpms_off) { core_link_disable_stream(pipe_ctx); /* for dpms, keep acquired resources*/ @@ -2118,8 +2192,6 @@ static void commit_planes_do_stream_update(struct dc *dc, core_link_enable_stream(dc->current_state, pipe_ctx); } - - dc->hwss.pipe_control_lock(dc, pipe_ctx, false); } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { @@ -2175,6 +2247,32 @@ static void commit_planes_for_stream(struct dc *dc, context_clock_trace(dc, context); } + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->top_pipe && + !pipe_ctx->prev_odm_pipe && + pipe_ctx->stream && + pipe_ctx->stream == stream) { + top_pipe_to_program = pipe_ctx; + } + } + + if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) + if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) + top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( + top_pipe_to_program->stream_res.tg); + + if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) + dc->hwss.interdependent_update_lock(dc, context, true); + else + /* Lock the top pipe while updating plane addrs, since freesync requires + * plane addr update event triggers to be synchronized. + * top_pipe_to_program is expected to never be NULL + */ + dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); + + // Stream updates if (stream_update) commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); @@ -2189,6 +2287,12 @@ static void commit_planes_for_stream(struct dc *dc, if (dc->hwss.program_front_end_for_ctx) dc->hwss.program_front_end_for_ctx(dc, context); + if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) + dc->hwss.interdependent_update_lock(dc, context, false); + else + dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); + + dc->hwss.post_unlock_program_front_end(dc, context); return; } @@ -2224,8 +2328,6 @@ static void commit_planes_for_stream(struct dc *dc, pipe_ctx->stream == stream) { struct dc_stream_status *stream_status = NULL; - top_pipe_to_program = pipe_ctx; - if (!pipe_ctx->plane_state) continue; @@ -2270,12 +2372,6 @@ static void commit_planes_for_stream(struct dc *dc, // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { - /* Lock the top pipe while updating plane addrs, since freesync requires - * plane addr update event triggers to be synchronized. - * top_pipe_to_program is expected to never be NULL - */ - dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); - if (dc->hwss.set_flip_control_gsl) for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2317,9 +2413,30 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.update_plane_addr(dc, pipe_ctx); } } + } + if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) + dc->hwss.interdependent_update_lock(dc, context, false); + else dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); - } + + if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) + if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { + top_pipe_to_program->stream_res.tg->funcs->wait_for_state( + top_pipe_to_program->stream_res.tg, + CRTC_STATE_VACTIVE); + top_pipe_to_program->stream_res.tg->funcs->wait_for_state( + top_pipe_to_program->stream_res.tg, + CRTC_STATE_VBLANK); + top_pipe_to_program->stream_res.tg->funcs->wait_for_state( + top_pipe_to_program->stream_res.tg, + CRTC_STATE_VACTIVE); + top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( + top_pipe_to_program->stream_res.tg); + } + + if (update_type != UPDATE_TYPE_FAST) + dc->hwss.post_unlock_program_front_end(dc, context); // Fire manual trigger only when bottom plane is flipped for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -2347,7 +2464,7 @@ void dc_commit_updates_for_stream(struct dc *dc, enum surface_update_type update_type; struct dc_state *context; struct dc_context *dc_ctx = dc->ctx; - int i; + int i, j; stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -2385,6 +2502,17 @@ void dc_commit_updates_for_stream(struct dc *dc, copy_surface_update_to_plane(surface, &srf_updates[i]); + if (update_type >= UPDATE_TYPE_MED) { + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = + &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->plane_state != surface) + continue; + + resource_build_scaling_params(pipe_ctx); + } + } } copy_stream_update_to_stream(dc, context, stream, stream_update); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index a09119c10d7c..67cfff1586e9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -45,7 +45,7 @@ #include "dpcd_defs.h" #include "dmcu.h" #include "hw/clk_mgr.h" -#include "../dce/dmub_psr.h" +#include "dce/dmub_psr.h" #define DC_LOGGER_INIT(logger) @@ -585,20 +585,23 @@ static void read_current_link_settings_on_detect(struct dc_link *link) LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; } -static bool detect_dp( - struct dc_link *link, - struct display_sink_capability *sink_caps, - bool *converter_disable_audio, - struct audio_support *audio_support, - enum dc_detect_reason reason) +static bool detect_dp(struct dc_link *link, + struct display_sink_capability *sink_caps, + bool *converter_disable_audio, + struct audio_support *audio_support, + enum dc_detect_reason reason) { bool boot = false; + sink_caps->signal = link_detect_sink(link, reason); sink_caps->transaction_type = get_ddc_transaction_type(sink_caps->signal); if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; + + dpcd_set_source_specific_data(link); + if (!detect_dp_sink_caps(link)) return false; @@ -606,9 +609,8 @@ static bool detect_dp( sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; link->type = dc_connection_mst_branch; - dal_ddc_service_set_transaction_type( - link->ddc, - sink_caps->transaction_type); + dal_ddc_service_set_transaction_type(link->ddc, + sink_caps->transaction_type); /* * This call will initiate MST topology discovery. Which @@ -637,13 +639,10 @@ static bool detect_dp( if (reason == DETECT_REASON_BOOT) boot = true; - dm_helpers_dp_update_branch_info( - link->ctx, - link); + dm_helpers_dp_update_branch_info(link->ctx, link); - if (!dm_helpers_dp_mst_start_top_mgr( - link->ctx, - link, boot)) { + if (!dm_helpers_dp_mst_start_top_mgr(link->ctx, + link, boot)) { /* MST not supported */ link->type = dc_connection_single; sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; @@ -651,7 +650,7 @@ static bool detect_dp( } if (link->type != dc_connection_mst_branch && - is_dp_active_dongle(link)) { + is_dp_active_dongle(link)) { /* DP active dongles */ link->type = dc_connection_active_dongle; if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { @@ -662,14 +661,15 @@ static bool detect_dp( return true; } - if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER) + if (link->dpcd_caps.dongle_type != + DISPLAY_DONGLE_DP_HDMI_CONVERTER) *converter_disable_audio = true; } } else { /* DP passive dongles */ sink_caps->signal = dp_passive_dongle_detection(link->ddc, - sink_caps, - audio_support); + sink_caps, + audio_support); } return true; @@ -769,8 +769,16 @@ static bool dc_link_detect_helper(struct dc_link *link, if ((link->connector_signal == SIGNAL_TYPE_LVDS || link->connector_signal == SIGNAL_TYPE_EDP) && - link->local_sink) + link->local_sink) { + + // need to re-write OUI and brightness in resume case + if (link->connector_signal == SIGNAL_TYPE_EDP) { + dpcd_set_source_specific_data(link); + dc_link_set_default_brightness_aux(link); //TODO: use cached + } + return true; + } if (false == dc_link_detect_sink(link, &new_connection_type)) { BREAK_TO_DEBUGGER(); @@ -818,6 +826,10 @@ static bool dc_link_detect_helper(struct dc_link *link, } case SIGNAL_TYPE_EDP: { + read_current_link_settings_on_detect(link); + + dpcd_set_source_specific_data(link); + detect_edp_sink_caps(link); read_current_link_settings_on_detect(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; @@ -958,10 +970,16 @@ static bool dc_link_detect_helper(struct dc_link *link, break; } + if (link->local_sink->edid_caps.panel_patch.disable_fec) + link->ctx->dc->debug.disable_fec = true; + // Check if edid is the same if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK))) same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); + if (sink->edid_caps.panel_patch.skip_scdc_overwrite) + link->ctx->dc->debug.hdmi20_disable = true; + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { /* @@ -1480,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx) } } -static enum dc_status enable_link_dp( - struct dc_state *state, - struct pipe_ctx *pipe_ctx) +static enum dc_status enable_link_dp(struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; @@ -1492,6 +1509,7 @@ static enum dc_status enable_link_dp( bool fec_enable; int i; bool apply_seamless_boot_optimization = false; + uint32_t bl_oled_enable_delay = 50; // in ms // check for seamless boot for (i = 0; i < state->stream_count; i++) { @@ -1513,31 +1531,45 @@ static enum dc_status enable_link_dp( pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; if (state->clk_mgr && !apply_seamless_boot_optimization) - state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); + state->clk_mgr->funcs->update_clocks(state->clk_mgr, + state, false); + + // during mode switch we do DP_SET_POWER off then on, and OUI is lost + dpcd_set_source_specific_data(link); skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) - skip_video_pattern = false; - - if (perform_link_training_with_retries( - &link_settings, - skip_video_pattern, - LINK_TRAINING_ATTEMPTS, - pipe_ctx, - pipe_ctx->stream->signal)) { + skip_video_pattern = false; + + if (perform_link_training_with_retries(&link_settings, + skip_video_pattern, + LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal)) { link->cur_link_settings = link_settings; status = DC_OK; - } - else + } else { status = DC_FAIL_DP_LINK_TRAINING; + } - if (link->preferred_training_settings.fec_enable != NULL) + if (link->preferred_training_settings.fec_enable) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; dp_set_fec_enable(link, fec_enable); + + // during mode set we do DP_SET_POWER off then on, aux writes are lost + if (link->dpcd_sink_ext_caps.bits.oled == 1 || + link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || + link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { + dc_link_set_default_brightness_aux(link); // TODO: use cached if known + if (link->dpcd_sink_ext_caps.bits.oled == 1) + msleep(bl_oled_enable_delay); + dc_link_backlight_enable_aux(link, true); + } + return status; } @@ -1733,8 +1765,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1752,8 +1783,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1765,8 +1795,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1786,8 +1815,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1805,8 +1833,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1818,8 +1845,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1837,8 +1863,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1849,8 +1874,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1861,10 +1885,14 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); } static void write_i2c_default_retimer_setting( @@ -1889,8 +1917,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1901,8 +1928,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0B to 0xDA or 0xD8 */ buffer[0] = 0x0B; @@ -1913,8 +1939,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1925,8 +1950,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0C to 0x1D or 0x91 */ buffer[0] = 0x0C; @@ -1937,8 +1961,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1949,8 +1972,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; if (is_vga_mode) { @@ -1965,8 +1987,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1977,8 +1998,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1989,9 +2009,13 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); } static void write_i2c_redriver_setting( @@ -2020,8 +2044,7 @@ static void write_i2c_redriver_setting( slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + DC_LOG_DEBUG("Set redriver failed"); } static void disable_link(struct dc_link *link, enum signal_type signal) @@ -2400,8 +2423,8 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool struct dmcu *dmcu = dc->res_pool->dmcu; struct dmub_psr *psr = dc->res_pool->psr; - if ((psr != NULL) && link->psr_feature_enabled) - psr->funcs->set_psr_enable(psr, allow_active); + if (psr != NULL && link->psr_feature_enabled) + psr->funcs->psr_enable(psr, allow_active); else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); @@ -2417,7 +2440,7 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) struct dmub_psr *psr = dc->res_pool->psr; if (psr != NULL && link->psr_feature_enabled) - psr->funcs->get_psr_state(psr_state); + psr->funcs->psr_get_state(psr, psr_state); else if (dmcu != NULL && link->psr_feature_enabled) dmcu->funcs->get_psr_state(dmcu, psr_state); @@ -2589,7 +2612,7 @@ bool dc_link_setup_psr(struct dc_link *link, psr_context->frame_delay = 0; if (psr) - link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context); + link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context); else link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); @@ -2922,10 +2945,13 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) memset(&config, 0, sizeof(config)); config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; - config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id; + /*stream_enc_inst*/ + config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst; config.dpms_off = dpms_off; config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + config.mst_supported = (pipe_ctx->stream->signal == + SIGNAL_TYPE_DISPLAY_PORT_MST); cp_psp->funcs.update_stream_config(cp_psp->handle, &config); } } @@ -3061,6 +3087,9 @@ void core_link_enable_stream( dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); + if (stream->sink_patches.delay_ignore_msa > 0) + msleep(stream->sink_patches.delay_ignore_msa); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); #if defined(CONFIG_DRM_AMD_DC_HDCP) @@ -3373,7 +3402,7 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps *= 8; /* 8 bits per byte*/ link_bw_kbps *= link_setting->lane_count; - if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { + if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) { /* Account for FEC overhead. * We have to do it based on caps, * and not based on FEC being set ready, @@ -3417,3 +3446,11 @@ void dc_link_overwrite_extended_receiver_cap( dp_overwrite_extended_receiver_cap(link); } +bool dc_link_is_fec_supported(const struct dc_link *link) +{ + return (dc_is_dp_signal(link->connector_signal) && + link->link_enc->features.fec_supported && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && + !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index a49c10d5df26..256889eed93e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -126,22 +126,16 @@ struct aux_payloads { struct vector payloads; }; -static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count) +static bool dal_ddc_i2c_payloads_create( + struct dc_context *ctx, + struct i2c_payloads *payloads, + uint32_t count) { - struct i2c_payloads *payloads; - - payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL); - - if (!payloads) - return NULL; - if (dal_vector_construct( &payloads->payloads, ctx, count, sizeof(struct i2c_payload))) - return payloads; - - kfree(payloads); - return NULL; + return true; + return false; } static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p) @@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p) return p->payloads.count; } -static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p) +static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p) { - if (!p || !*p) + if (!p) return; - dal_vector_destruct(&(*p)->payloads); - kfree(*p); - *p = NULL; + dal_vector_destruct(&p->payloads); } #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) @@ -524,9 +516,13 @@ bool dal_ddc_service_query_ddc_data( uint32_t payloads_num = write_payloads + read_payloads; + if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE) return false; + if (!payloads_num) + return false; + /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { @@ -557,23 +553,25 @@ bool dal_ddc_service_query_ddc_data( ret = dal_ddc_submit_aux_command(ddc, &payload); } } else { - struct i2c_payloads *payloads = - dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); + struct i2c_command command = {0}; + struct i2c_payloads payloads; - struct i2c_command command = { - .payloads = dal_ddc_i2c_payloads_get(payloads), - .number_of_payloads = 0, - .engine = DDC_I2C_COMMAND_ENGINE, - .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num)) + return false; + + command.payloads = dal_ddc_i2c_payloads_get(&payloads); + command.number_of_payloads = 0; + command.engine = DDC_I2C_COMMAND_ENGINE; + command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; dal_ddc_i2c_payloads_add( - payloads, address, write_size, write_buf, true); + &payloads, address, write_size, write_buf, true); dal_ddc_i2c_payloads_add( - payloads, address, read_size, read_buf, false); + &payloads, address, read_size, read_buf, false); command.number_of_payloads = - dal_ddc_i2c_payloads_get_count(payloads); + dal_ddc_i2c_payloads_get_count(&payloads); ret = dm_helpers_submit_i2c( ddc->ctx, @@ -686,6 +684,10 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service, uint8_t write_buffer[2] = {0}; /*Lower than 340 Scramble bit from SCDC caps*/ + if (ddc_service->link->local_sink && + ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) + return; + dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &sink_version, sizeof(sink_version)); if (sink_version == 1) { @@ -715,6 +717,10 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service) uint8_t offset = HDMI_SCDC_TMDS_CONFIG; uint8_t tmds_config = 0; + if (ddc_service->link->local_sink && + ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) + return; + dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &tmds_config, sizeof(tmds_config)); if (tmds_config & 0x1) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index fd9e69634c50..caa090d0b6ac 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -220,6 +220,30 @@ static enum dpcd_training_patterns return dpcd_tr_pattern; } +static uint8_t dc_dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + uint8_t disable_scrabled_data_symbols = 0; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + case DP_TRAINING_PATTERN_SEQUENCE_2: + case DP_TRAINING_PATTERN_SEQUENCE_3: + disable_scrabled_data_symbols = 1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + disable_scrabled_data_symbols = 0; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + return disable_scrabled_data_symbols; +} + static inline bool is_repeater(struct dc_link *link, uint32_t offset) { return (!link->is_lttpr_mode_transparent && offset != 0); @@ -252,6 +276,9 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = + dc_dp_initialize_scrambling_data_symbols(link, pattern); + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; @@ -945,6 +972,17 @@ static enum link_training_result perform_channel_equalization_sequence( } #define TRAINING_AUX_RD_INTERVAL 100 //us +static void start_clock_recovery_pattern_early(struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", + __func__); + dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset); + dp_set_hw_lane_settings(link, lt_settings, offset); + udelay(400); +} + static enum link_training_result perform_clock_recovery_sequence( struct dc_link *link, struct link_training_settings *lt_settings, @@ -962,7 +1000,8 @@ static enum link_training_result perform_clock_recovery_sequence( retries_cr = 0; retry_count = 0; - dp_set_hw_training_pattern(link, tr_pattern, offset); + if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) + dp_set_hw_training_pattern(link, tr_pattern, offset); /* najeeb - The synaptics MST hub can put the LT in * infinite loop by switching the VS @@ -1434,6 +1473,13 @@ enum link_training_result dc_link_dp_perform_link_training( &link->preferred_training_settings, <_settings); + /* Configure lttpr mode */ + if (!link->is_lttpr_mode_transparent) + configure_lttpr_mode(link); + + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, <_settings, DPRX); + /* 1. set link rate, lane count and spread. */ dpcd_set_link_settings(link, <_settings); @@ -1445,8 +1491,6 @@ enum link_training_result dc_link_dp_perform_link_training( dp_set_fec_ready(link, fec_enable); if (!link->is_lttpr_mode_transparent) { - /* Configure lttpr mode */ - configure_lttpr_mode(link); /* 2. perform link training (set link training done * to false is done as well) @@ -1654,6 +1698,8 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_set_panel_mode(link, panel_mode); /* Attempt to train with given link training settings */ + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, <_settings, DPRX); /* Set link rate, lane count and spread. */ dpcd_set_link_settings(link, <_settings); @@ -1892,6 +1938,16 @@ bool dp_verify_link_cap( /* disable PHY done possible by BIOS, will be done by driver itself */ dp_disable_link_phy(link, link->connector_signal); + dp_cs_id = get_clock_source_id(link); + + /* link training starts with the maximum common settings + * supported by both sink and ASIC. + */ + initial_link_settings = get_common_supported_link_settings( + *known_limit_link_setting, + max_link_cap); + cur_link_setting = initial_link_settings; + /* Temporary Renoir-specific workaround for SWDEV-215184; * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle, * so add extra cycle of enabling and disabling the PHY before first link training. @@ -1902,15 +1958,6 @@ bool dp_verify_link_cap( dp_disable_link_phy(link, link->connector_signal); } - dp_cs_id = get_clock_source_id(link); - - /* link training starts with the maximum common settings - * supported by both sink and ASIC. - */ - initial_link_settings = get_common_supported_link_settings( - *known_limit_link_setting, - max_link_cap); - cur_link_setting = initial_link_settings; do { skip_video_pattern = true; @@ -2654,9 +2701,12 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) break; } - test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? - DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : - DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + if (dpcd_test_params.bits.CLR_FORMAT == 0) + test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; + else + test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; dc_link_dp_set_test_pattern( link, @@ -2888,6 +2938,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + link->dc->hwss.blank_stream(pipe_ctx); + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) break; } @@ -2904,6 +2960,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_reallocate_mst_payload(link); + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings); + } + status = false; if (out_link_loss) *out_link_loss = true; @@ -3165,6 +3227,23 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, link->wa_flags.dp_keep_receiver_powered = false; } +/* Read additional sink caps defined in source specific DPCD area + * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) + */ +static bool dpcd_read_sink_ext_caps(struct dc_link *link) +{ + uint8_t dpcd_data; + + if (!link) + return false; + + if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) + return false; + + link->dpcd_sink_ext_caps.raw = dpcd_data; + return true; +} + static bool retrieve_link_cap(struct dc_link *link) { /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, @@ -3448,6 +3527,9 @@ static bool retrieve_link_cap(struct dc_link *link) sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw)); } + if (!dpcd_read_sink_ext_caps(link)) + link->dpcd_sink_ext_caps.raw = 0; + /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -3600,6 +3682,8 @@ void detect_edp_sink_caps(struct dc_link *link) } } link->verified_link_cap = link->reported_link_cap; + + dc_link_set_default_brightness_aux(link); } void dc_link_dp_enable_hpd(const struct dc_link *link) @@ -3691,7 +3775,8 @@ static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *odm_pipe; enum controller_dp_color_space controller_color_space; int opp_cnt = 1; - int count; + int offset = 0; + int dpg_width = width; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -3713,33 +3798,30 @@ static void set_crtc_test_pattern(struct dc_link *link, for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; + dpg_width = width / opp_cnt; + offset = dpg_width; - width /= opp_cnt; + opp->funcs->opp_set_disp_pattern_generator(opp, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + dpg_width, + height, + 0); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, controller_test_pattern, controller_color_space, color_depth, NULL, - width, - height); - } - opp->funcs->opp_set_disp_pattern_generator(opp, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - width, - height); - /* wait for dpg to blank pixel data with test pattern */ - for (count = 0; count < 1000; count++) { - if (opp->funcs->dpg_is_blanked(opp)) - break; - udelay(100); + dpg_width, + height, + offset); + offset += offset; } } } @@ -3757,11 +3839,12 @@ static void set_crtc_test_pattern(struct dc_link *link, else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; + int dpg_width = width; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; - width /= opp_cnt; + dpg_width = width / opp_cnt; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; @@ -3771,16 +3854,18 @@ static void set_crtc_test_pattern(struct dc_link *link, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - width, - height); + dpg_width, + height, + 0); } opp->funcs->opp_set_disp_pattern_generator(opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - width, - height); + dpg_width, + height, + 0); } } break; @@ -3958,6 +4043,11 @@ bool dc_link_dp_set_test_pattern( default: break; } + + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( + pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); /* update MSA to requested color space */ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream->timing, @@ -3965,9 +4055,27 @@ bool dc_link_dp_set_test_pattern( pipe_ctx->stream->use_vsc_sdp_for_colorimetry, link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range + else + pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); + resource_build_info_frame(pipe_ctx); + link->dc->hwss.update_info_frame(pipe_ctx); + } + /* CRTC Patterns */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); - + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( + pipe_ctx->stream_res.tg); /* Set Test Pattern state */ link->test_pattern_enabled = true; } @@ -4097,8 +4205,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready) struct link_encoder *link_enc = link->link_enc; uint8_t fec_config = 0; - if (link->dc->debug.disable_fec || - IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)) + if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec) return; if (link_enc->funcs->fec_set_ready && @@ -4133,8 +4240,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) { struct link_encoder *link_enc = link->link_enc; - if (link->dc->debug.disable_fec || - IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)) + if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec) return; if (link_enc->funcs->fec_set_enable && @@ -4157,3 +4263,163 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } +void dpcd_set_source_specific_data(struct dc_link *link) +{ + const uint32_t post_oui_delay = 30; // 30ms + uint8_t dspc = 0; + enum dc_status ret = DC_ERROR_UNEXPECTED; + + ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc, + sizeof(dspc)); + + if (ret != DC_OK) { + DC_LOG_ERROR("Error in DP aux read transaction," + " not writing source specific data\n"); + return; + } + + /* Return if OUI unsupported */ + if (!(dspc & DP_OUI_SUPPORT)) + return; + + if (!link->dc->vendor_signature.is_valid) { + struct dpcd_amd_signature amd_signature; + amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; + amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; + amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; + amd_signature.device_id_byte1 = + (uint8_t)(link->ctx->asic_id.chip_id); + amd_signature.device_id_byte2 = + (uint8_t)(link->ctx->asic_id.chip_id >> 8); + memset(&amd_signature.zero, 0, 4); + amd_signature.dce_version = + (uint8_t)(link->ctx->dce_version); + amd_signature.dal_version_byte1 = 0x0; // needed? where to get? + amd_signature.dal_version_byte2 = 0x0; // needed? where to get? + + core_link_write_dpcd(link, DP_SOURCE_OUI, + (uint8_t *)(&amd_signature), + sizeof(amd_signature)); + + } else { + core_link_write_dpcd(link, DP_SOURCE_OUI, + link->dc->vendor_signature.data.raw, + sizeof(link->dc->vendor_signature.data.raw)); + } + + // Sink may need to configure internals based on vendor, so allow some + // time before proceeding with possibly vendor specific transactions + msleep(post_oui_delay); +} + +bool dc_link_set_backlight_level_nits(struct dc_link *link, + bool isHDR, + uint32_t backlight_millinits, + uint32_t transition_time_in_ms) +{ + struct dpcd_source_backlight_set dpcd_backlight_set; + uint8_t backlight_control = isHDR ? 1 : 0; + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + // OLEDs have no PWM, they can only use AUX + if (link->dpcd_sink_ext_caps.bits.oled == 1) + backlight_control = 1; + + *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; + *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)(&dpcd_backlight_set), + sizeof(dpcd_backlight_set)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_control, 1) != DC_OK) + return false; + + return true; +} + +bool dc_link_get_backlight_level_nits(struct dc_link *link, + uint32_t *backlight_millinits_avg, + uint32_t *backlight_millinits_peak) +{ + union dpcd_source_backlight_get dpcd_backlight_get; + + memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get)); + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, + dpcd_backlight_get.raw, + sizeof(union dpcd_source_backlight_get))) + return false; + + *backlight_millinits_avg = + dpcd_backlight_get.bytes.backlight_millinits_avg; + *backlight_millinits_peak = + dpcd_backlight_get.bytes.backlight_millinits_peak; + + /* On non-supported panels dpcd_read usually succeeds with 0 returned */ + if (*backlight_millinits_avg == 0 || + *backlight_millinits_avg > *backlight_millinits_peak) + return false; + + return true; +} + +bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable) +{ + uint8_t backlight_enable = enable ? 1 : 0; + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, + &backlight_enable, 1) != DC_OK) + return false; + + return true; +} + +// we read default from 0x320 because we expect BIOS wrote it there +// regular get_backlight_nit reads from panel set at 0x326 +bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits) +{ + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *) backlight_millinits, + sizeof(uint32_t))) + return false; + + return true; +} + +bool dc_link_set_default_brightness_aux(struct dc_link *link) +{ + uint32_t default_backlight; + + if (link && + (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 || + link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) { + if (!dc_link_read_default_bl_aux(link, &default_backlight)) + default_backlight = 150000; + // if < 5 nits or > 5000, it might be wrong readback + if (default_backlight < 5000 || default_backlight > 5000000) + default_backlight = 150000; // + + return dc_link_set_backlight_level_nits(link, true, + default_backlight, 0); + } + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index ddb855045767..51e0ee6e7695 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -153,18 +153,19 @@ bool edp_receiver_ready_T9(struct dc_link *link) unsigned char edpRev = 0; enum dc_status result = DC_OK; result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); - if (edpRev < DP_EDP_12) - return true; - /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ - do { - sinkstatus = 1; - result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); - if (sinkstatus == 0) - break; - if (result != DC_OK) - break; - udelay(100); //MAx T9 - } while (++tries < 50); + + /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + if (result == DC_OK && edpRev >= DP_EDP_12) { + do { + sinkstatus = 1; + result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); + if (sinkstatus == 0) + break; + if (result != DC_OK) + break; + udelay(100); //MAx T9 + } while (++tries < 50); + } if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0) udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000); @@ -183,21 +184,22 @@ bool edp_receiver_ready_T7(struct dc_link *link) unsigned long long time_taken_in_ns = 0; result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); - if (result == DC_OK && edpRev < DP_EDP_12) - return true; - /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ - enter_timestamp = dm_get_timestamp(link->ctx); - do { - sinkstatus = 0; - result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); - if (sinkstatus == 1) - break; - if (result != DC_OK) - break; - udelay(25); - finish_timestamp = dm_get_timestamp(link->ctx); - time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); - } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms + + if (result == DC_OK && edpRev >= DP_EDP_12) { + /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + enter_timestamp = dm_get_timestamp(link->ctx); + do { + sinkstatus = 0; + result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); + if (sinkstatus == 1) + break; + if (result != DC_OK) + break; + udelay(25); + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); + } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms + } if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0) udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); @@ -429,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -533,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; DC_LOG_DSC(" "); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index a0eb9e533a61..f4bcc71b2920 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -46,12 +46,12 @@ #include "dce100/dce100_resource.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" +#include "dce120/dce120_resource.h" #if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/dcn10_resource.h" -#endif #include "dcn20/dcn20_resource.h" #include "dcn21/dcn21_resource.h" -#include "dce120/dce120_resource.h" +#endif #define DC_LOGGER_INIT(logger) @@ -532,6 +532,51 @@ static inline void get_vp_scan_direction( *flip_horz_scan_dir = !*flip_horz_scan_dir; } +int get_num_odm_splits(struct pipe_ctx *pipe) +{ + int odm_split_count = 0; + struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + while (next_pipe) { + odm_split_count++; + next_pipe = next_pipe->next_odm_pipe; + } + pipe = pipe->prev_odm_pipe; + while (pipe) { + odm_split_count++; + pipe = pipe->prev_odm_pipe; + } + return odm_split_count; +} + +static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx) +{ + *split_count = get_num_odm_splits(pipe_ctx); + *split_idx = 0; + if (*split_count == 0) { + /*Check for mpc split*/ + struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; + + while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { + (*split_idx)++; + (*split_count)++; + split_pipe = split_pipe->top_pipe; + } + split_pipe = pipe_ctx->bottom_pipe; + while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { + (*split_count)++; + split_pipe = split_pipe->bottom_pipe; + } + } else { + /*Get odm split index*/ + struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe; + + while (split_pipe) { + (*split_idx)++; + split_pipe = split_pipe->prev_odm_pipe; + } + } +} + static void calculate_viewport(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -541,16 +586,16 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) struct rect clip, dest; int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; - bool pri_split = pipe_ctx->bottom_pipe && - pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; - bool sec_split = pipe_ctx->top_pipe && - pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; + int split_count = 0; + int split_idx = 0; bool orthogonal_rotation, flip_y_start, flip_x_start; + calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); + if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { - pri_split = false; - sec_split = false; + split_count = 0; + split_idx = 0; } /* The actual clip is an intersection between stream @@ -609,23 +654,32 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) data->viewport.height = clip.height * surf_src.height / dest.height; /* Handle split */ - if (pri_split || sec_split) { + if (split_count) { + /* extra pixels in the division remainder need to go to pipes after + * the extra pixel index minus one(epimo) defined here as: + */ + int epimo = 0; + if (orthogonal_rotation) { - if (flip_y_start != pri_split) - data->viewport.height /= 2; - else { - data->viewport.y += data->viewport.height / 2; - /* Ceil offset pipe */ - data->viewport.height = (data->viewport.height + 1) / 2; - } + if (flip_y_start) + split_idx = split_count - split_idx; + + epimo = split_count - data->viewport.height % (split_count + 1); + + data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx; + if (split_idx > epimo) + data->viewport.y += split_idx - epimo - 1; + data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0); } else { - if (flip_x_start != pri_split) - data->viewport.width /= 2; - else { - data->viewport.x += data->viewport.width / 2; - /* Ceil offset pipe */ - data->viewport.width = (data->viewport.width + 1) / 2; - } + if (flip_x_start) + split_idx = split_count - split_idx; + + epimo = split_count - data->viewport.width % (split_count + 1); + + data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx; + if (split_idx > epimo) + data->viewport.x += split_idx - epimo - 1; + data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0); } } @@ -644,58 +698,58 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; const struct dc_stream_state *stream = pipe_ctx->stream; + struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect surf_clip = plane_state->clip_rect; - bool pri_split = pipe_ctx->bottom_pipe && - pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; - bool sec_split = pipe_ctx->top_pipe && - pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; - bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; - - pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x; + bool pri_split_tb = pipe_ctx->bottom_pipe && + pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state && + stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; + bool sec_split_tb = pipe_ctx->top_pipe && + pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state && + stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; + int split_count = 0; + int split_idx = 0; + + calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); + + data->recout.x = stream->dst.x; if (stream->src.x < surf_clip.x) - pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x - - stream->src.x) * stream->dst.width + data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width / stream->src.width; - pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width * - stream->dst.width / stream->src.width; - if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x > - stream->dst.x + stream->dst.width) - pipe_ctx->plane_res.scl_data.recout.width = - stream->dst.x + stream->dst.width - - pipe_ctx->plane_res.scl_data.recout.x; + data->recout.width = surf_clip.width * stream->dst.width / stream->src.width; + if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width) + data->recout.width = stream->dst.x + stream->dst.width - data->recout.x; - pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y; + data->recout.y = stream->dst.y; if (stream->src.y < surf_clip.y) - pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y - - stream->src.y) * stream->dst.height + data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height / stream->src.height; - pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height * - stream->dst.height / stream->src.height; - if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y > - stream->dst.y + stream->dst.height) - pipe_ctx->plane_res.scl_data.recout.height = - stream->dst.y + stream->dst.height - - pipe_ctx->plane_res.scl_data.recout.y; + data->recout.height = surf_clip.height * stream->dst.height / stream->src.height; + if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height) + data->recout.height = stream->dst.y + stream->dst.height - data->recout.y; /* Handle h & v split, handle rotation using viewport */ - if (sec_split && top_bottom_split) { - pipe_ctx->plane_res.scl_data.recout.y += - pipe_ctx->plane_res.scl_data.recout.height / 2; + if (sec_split_tb) { + data->recout.y += data->recout.height / 2; /* Floor primary pipe, ceil 2ndary pipe */ - pipe_ctx->plane_res.scl_data.recout.height = - (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2; - } else if (pri_split && top_bottom_split) - pipe_ctx->plane_res.scl_data.recout.height /= 2; - else if (sec_split) { - pipe_ctx->plane_res.scl_data.recout.x += - pipe_ctx->plane_res.scl_data.recout.width / 2; - /* Ceil offset pipe */ - pipe_ctx->plane_res.scl_data.recout.width = - (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; - } else if (pri_split) - pipe_ctx->plane_res.scl_data.recout.width /= 2; + data->recout.height = (data->recout.height + 1) / 2; + } else if (pri_split_tb) + data->recout.height /= 2; + else if (split_count) { + /* extra pixels in the division remainder need to go to pipes after + * the extra pixel index minus one(epimo) defined here as: + */ + int epimo = split_count - data->recout.width % (split_count + 1); + + /*no recout offset due to odm */ + if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) { + data->recout.x += (data->recout.width / (split_count + 1)) * split_idx; + if (split_idx > epimo) + data->recout.x += split_idx - epimo - 1; + } + data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0); + } } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) @@ -832,12 +886,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; const struct dc_stream_state *stream = pipe_ctx->stream; + struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect src = pipe_ctx->plane_state->src_rect; int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v; int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; + int odm_idx = 0; /* * Need to calculate the scan direction for viewport to make adjustments @@ -869,6 +925,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) * stream->dst.width / stream->src.width - src.x * plane_state->dst_rect.width / src.width * stream->dst.width / stream->src.width); + /*modified recout_skip_h calculation due to odm having no recout offset*/ + while (odm_pipe) { + odm_idx++; + odm_pipe = odm_pipe->prev_odm_pipe; + } + if (odm_idx) + recout_skip_h += odm_idx * data->recout.width; + recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y) * stream->dst.height / stream->src.height - src.y * plane_state->dst_rect.height / src.height @@ -1013,6 +1077,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) * on certain displays, such as the Sharp 4k */ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha; pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left; pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top; @@ -1021,6 +1086,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) store_h_border_left + timing->h_border_right; pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; + if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) + pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1; /* Taps calculations */ if (pipe_ctx->plane_res.xfm != NULL) @@ -2034,7 +2101,7 @@ enum dc_status resource_map_pool_resources( for (i = 0; i < context->stream_count; i++) if (context->streams[i] == stream) { context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst; - context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id; + context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst; context->stream_status[i].audio_inst = pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1; @@ -2108,10 +2175,10 @@ enum dc_status dc_validate_global_state( if (pipe_ctx->stream != stream) continue; - if (dc->res_pool->funcs->get_default_swizzle_mode && + if (dc->res_pool->funcs->patch_unknown_plane_state && pipe_ctx->plane_state && pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) { - result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state); + result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state); if (result != DC_OK) return result; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 6ddbb00ed37a..4f0e7203dba4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status( return dc_stream_get_status_from_state(dc->current_state, stream); } -static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) -{ -#if defined(CONFIG_DRM_AMD_DC_DCN) - unsigned int vupdate_line; - unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos; - struct dc_stream_state *stream = pipe_ctx->stream; - unsigned int us_per_line; - - if (stream->ctx->asic_id.chip_family == FAMILY_RV && - ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { - - vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); - if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos)) - return; - - if (vpos >= vupdate_line) - return; - - us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz; - lines_to_vupdate = vupdate_line - vpos; - us_to_vupdate = lines_to_vupdate * us_per_line; - - /* 70 us is a conservative estimate of cursor update time*/ - if (us_to_vupdate < 70) - udelay(us_to_vupdate); - } -#endif -} /** * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address @@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - - delay_cursor_until_vupdate(pipe_ctx, dc); - dc->hwss.pipe_control_lock(dc, pipe_to_program, true); + dc->hwss.cursor_lock(dc, pipe_to_program, true); } dc->hwss.set_cursor_attribute(pipe_ctx); @@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes( } if (pipe_to_program) - dc->hwss.pipe_control_lock(dc, pipe_to_program, false); + dc->hwss.cursor_lock(dc, pipe_to_program, false); return true; } @@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - - delay_cursor_until_vupdate(pipe_ctx, dc); - dc->hwss.pipe_control_lock(dc, pipe_to_program, true); + dc->hwss.cursor_lock(dc, pipe_to_program, true); } dc->hwss.set_cursor_position(pipe_ctx); } if (pipe_to_program) - dc->hwss.pipe_control_lock(dc, pipe_to_program, false); + dc->hwss.cursor_lock(dc, pipe_to_program, false); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c index a96d8de9380e..64cf24a9ab08 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c @@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c */ memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config)); dc->vm_pa_config.valid = true; + + if (pa_config->is_hvm_enabled == 0) + dc->debug.nv12_iflip_vm_wa = false; } return num_vmids; @@ -62,7 +65,7 @@ int dc_get_vmid_use_vector(struct dc *dc) int i; int in_use = 0; - for (i = 0; i < dc->vm_helper->num_vmid; i++) + for (i = 0; i < MAX_HUBP; i++) in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0] | dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1]; return in_use; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 8ff25b5dd2f6..1935cf6601eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.69" +#define DC_VER "3.2.76" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -126,6 +126,7 @@ struct dc_bug_wa { bool no_connect_phy_config; bool dedcn20_305_wa; bool skip_clock_update; + bool lt_early_cr_pattern; }; struct dc_dcc_surface_param { @@ -229,6 +230,7 @@ struct dc_config { bool forced_clocks; bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well bool multi_mon_pp_mclk_switch; + bool psr_on_dmub; }; enum visual_confirm { @@ -388,6 +390,7 @@ struct dc_debug_options { int always_scale; bool disable_pplib_clock_request; bool disable_clock_gate; + bool disable_mem_low_power; bool disable_dmcu; bool disable_psr; bool force_abm_enable; @@ -453,6 +456,7 @@ struct dc_phy_addr_space_config { } gart_config; bool valid; + bool is_hvm_enabled; uint64_t page_table_default_page_addr; }; @@ -518,6 +522,7 @@ struct dc { /* Require to optimize clocks and bandwidth for added/removed planes */ bool optimized_required; + bool wm_optimized_required; /* Require to maintain clocks and bandwidth for UEFI enabled HW */ int optimize_seamless_boot_streams; @@ -526,6 +531,7 @@ struct dc { struct compressor *fbc_compressor; struct dc_debug_data debug_data; + struct dpcd_vendor_signature vendor_signature; const char *build_id; struct vm_helper *vm_helper; @@ -565,12 +571,14 @@ struct dc_init_data { struct dc_reg_helper_state *dmub_offload; struct dc_config flags; - uint32_t log_mask; + uint64_t log_mask; + /** * gpu_info FW provided soc bounding box struct or 0 if not * available in FW */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; + struct dpcd_vendor_signature vendor_signature; }; struct dc_callback_init { @@ -682,7 +690,6 @@ struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; struct fixed31_32 hdr_multiplier; - bool initialized; /*remove after diag fix*/ union dc_3dlut_state state; struct dc_context *ctx; }; @@ -719,6 +726,7 @@ union surface_update_flags { uint32_t output_tf_change:1; uint32_t pixel_format_change:1; uint32_t plane_size_change:1; + uint32_t gamut_remap_change:1; /* Full updates */ uint32_t new_plane:1; @@ -753,6 +761,7 @@ struct dc_plane_state { struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; struct fixed31_32 hdr_mult; + struct colorspace_transform gamut_remap_matrix; // TODO: No longer used, remove struct dc_hdr_static_metadata hdr_static_ctx; @@ -832,6 +841,7 @@ struct dc_surface_update { const struct dc_transfer_func *func_shaper; const struct dc_3dlut *lut3d_func; const struct dc_transfer_func *blend_tf; + const struct colorspace_transform *gamut_remap_matrix; }; /* @@ -865,6 +875,7 @@ struct dc_flip_addrs { unsigned int flip_timestamp_in_us; bool flip_immediate; /* TODO: add flip duration for FreeSync */ + bool triplebuffer_flips; }; bool dc_post_update_surfaces_to_stream( @@ -979,6 +990,20 @@ struct dpcd_caps { }; +union dpcd_sink_ext_caps { + struct { + /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode + * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode. + */ + uint8_t sdr_aux_backlight_control : 1; + uint8_t hdr_aux_backlight_control : 1; + uint8_t reserved_1 : 2; + uint8_t oled : 1; + uint8_t reserved : 3; + } bits; + uint8_t raw; +}; + #include "dc_link.h" /******************************************************************************* @@ -1004,6 +1029,11 @@ struct dc_sink_dsc_caps { struct dsc_dec_dpcd_caps dsc_dec_caps; }; +struct dc_sink_fec_caps { + bool is_rx_fec_supported; + bool is_topology_fec_supported; +}; + /* * The sink structure contains EDID and other display device properties */ @@ -1017,7 +1047,10 @@ struct dc_sink { struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; bool converter_disable_audio; - struct dc_sink_dsc_caps sink_dsc_caps; + struct dc_sink_dsc_caps dsc_caps; + struct dc_sink_fec_caps fec_caps; + + bool is_vsc_sdp_colorimetry_supported; /* private to DC core */ struct dc_link *link; @@ -1075,7 +1108,6 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc); unsigned int dc_get_target_backlight_pwm(struct dc *dc); bool dc_is_dmcu_initialized(struct dc *dc); -bool dc_is_hw_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index dfe4472c9e40..bb2730e9521e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -432,6 +432,54 @@ struct dp_sink_hw_fw_revision { uint8_t ieee_fw_rev[2]; }; +struct dpcd_vendor_signature { + bool is_valid; + + union dpcd_ieee_vendor_signature { + struct { + uint8_t ieee_oui[3];/*24-bit IEEE OUI*/ + uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/ + uint8_t ieee_hw_rev; + uint8_t ieee_fw_rev[2]; + }; + uint8_t raw[12]; + } data; +}; + +struct dpcd_amd_signature { + uint8_t AMD_IEEE_TxSignature_byte1; + uint8_t AMD_IEEE_TxSignature_byte2; + uint8_t AMD_IEEE_TxSignature_byte3; + uint8_t device_id_byte1; + uint8_t device_id_byte2; + uint8_t zero[4]; + uint8_t dce_version; + uint8_t dal_version_byte1; + uint8_t dal_version_byte2; +}; + +struct dpcd_source_backlight_set { + struct { + uint8_t byte0; + uint8_t byte1; + uint8_t byte2; + uint8_t byte3; + } backlight_level_millinits; + + struct { + uint8_t byte0; + uint8_t byte1; + } backlight_transition_time_ms; +}; + +union dpcd_source_backlight_get { + struct { + uint32_t backlight_millinits_peak; /* 326h */ + uint32_t backlight_millinits_avg; /* 32Ah */ + } bytes; + uint8_t raw[8]; +}; + /*DPCD register of DP receiver capability field bits-*/ union edp_configuration_cap { struct { diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 25c50bcab9e9..a8dc3082e3e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -385,6 +385,8 @@ struct dc_cursor_position { */ bool enable; + /* Translate cursor x/y by the source rectangle for each plane. */ + bool translate_by_source; }; struct dc_cursor_mi_param { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index d25603128394..00ff5e98278c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -26,6 +26,7 @@ #ifndef DC_LINK_H_ #define DC_LINK_H_ +#include "dc.h" #include "dc_types.h" #include "grph_object_defs.h" @@ -128,6 +129,7 @@ struct dc_link { enum edp_revision edp_revision; bool psr_feature_enabled; bool psr_allow_active; + union dpcd_sink_ext_caps dpcd_sink_ext_caps; /* MST record stream using this link */ struct link_flags { @@ -178,6 +180,21 @@ bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp); +/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */ +bool dc_link_set_backlight_level_nits(struct dc_link *link, + bool isHDR, + uint32_t backlight_millinits, + uint32_t transition_time_in_ms); + +bool dc_link_get_backlight_level_nits(struct dc_link *link, + uint32_t *backlight_millinits, + uint32_t *backlight_millinits_peak); + +bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable); + +bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits); +bool dc_link_set_default_brightness_aux(struct dc_link *link); + int dc_link_get_backlight_level(const struct dc_link *dc_link); bool dc_link_set_abm_disable(const struct dc_link *dc_link); @@ -316,4 +333,7 @@ bool dc_submit_i2c_oem( uint32_t dc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing); + +bool dc_link_is_fec_supported(const struct dc_link *link); + #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 92096de79dec..a5c7ef47b8d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -118,6 +118,7 @@ union stream_update_flags { uint32_t dpms_off:1; uint32_t gamut_remap:1; uint32_t wb_update:1; + uint32_t dsc_changed : 1; } bits; uint32_t raw; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index e59532d98cb4..0d210104ba0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -229,7 +229,9 @@ struct dc_panel_patch { unsigned int extra_t12_ms; unsigned int extra_delay_backlight_off; unsigned int extra_t7_ms; - unsigned int manage_secondary_link; + unsigned int skip_scdc_overwrite; + unsigned int delay_ignore_msa; + unsigned int disable_fec; }; struct dc_edid_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index fdf3d8f87eee..fbfcff700971 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile @@ -29,7 +29,7 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ -dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o +dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 68c4049cbc2a..743042d5905a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, case AUX_TRANSACTION_REPLY_AUX_DEFER: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: retry_on_defer = true; - /* fall through */ + fallthrough; case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 30d953acd016..f0cebe721bcc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -378,6 +378,11 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); const struct dc_config *config = &dmcu->ctx->dc->config; bool status = false; + struct dc_context *ctx = dmcu->ctx; + unsigned int i; + // 5 4 3 2 1 0 + // F E D C B A - bit 0 is A, bit 5 is F + unsigned int tx_interrupt_mask = 0; PERF_TRACE(); /* Definition of DC_DMCU_SCRATCH @@ -387,6 +392,15 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) */ dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH); + for (i = 0; i < ctx->dc->link_count; i++) { + if (ctx->dc->links[i]->link_enc->features.flags.bits.DP_IS_USB_C) { + if (ctx->dc->links[i]->link_enc->transmitter >= TRANSMITTER_UNIPHY_A && + ctx->dc->links[i]->link_enc->transmitter <= TRANSMITTER_UNIPHY_F) { + tx_interrupt_mask |= 1 << ctx->dc->links[i]->link_enc->transmitter; + } + } + } + switch (dmcu->dmcu_state) { case DMCU_UNLOADED: status = false; @@ -401,6 +415,8 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) /* Set backlight ramping stepsize */ REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize); + REG_WRITE(MASTER_COMM_DATA_REG3, tx_interrupt_mask); + /* Set command to initialize microcontroller */ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_INIT_DMCU); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 066188ba7949..24adec407972 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -267,6 +267,9 @@ static void set_speed( uint32_t xtal_ref_div = 0; uint32_t prescale = 0; + if (speed == 0) + return; + REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); if (xtal_ref_div == 0) @@ -274,17 +277,15 @@ static void set_speed( prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed; - if (speed) { - if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) - REG_UPDATE_N(SPEED, 3, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); - else - REG_UPDATE_N(SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); - } + if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) + REG_UPDATE_N(SPEED, 3, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); + else + REG_UPDATE_N(SPEED, 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } static bool setup_engine( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 8aa937f496c4..51481e922eb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -479,7 +479,7 @@ static void program_grph_pixel_format( case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: sign = 1; floating = 1; - /* fall through */ + fallthrough; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: grph_depth = 3; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c index 48862bebf29e..7311f312369f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c @@ -22,1004 +22,1330 @@ * Authors: AMD * */ - #include "transform.h" +//========================================= +// <num_taps> = 2 +// <num_phases> = 16 +// <scale_ratio> = 0.833333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = s1.10 +// <CoefOut> = s1.12 +//========================================= static const uint16_t filter_2tap_16p[18] = { - 4096, 0, - 3840, 256, - 3584, 512, - 3328, 768, - 3072, 1024, - 2816, 1280, - 2560, 1536, - 2304, 1792, - 2048, 2048 + 0x1000, 0x0000, + 0x0FF0, 0x0010, + 0x0FB0, 0x0050, + 0x0F34, 0x00CC, + 0x0E68, 0x0198, + 0x0D44, 0x02BC, + 0x0BC4, 0x043C, + 0x09FC, 0x0604, + 0x0800, 0x0800 }; +//========================================= +// <num_taps> = 3 +// <num_phases> = 16 +// <scale_ratio> = 0.83333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_3tap_16p_upscale[27] = { - 2048, 2048, 0, - 1708, 2424, 16348, - 1372, 2796, 16308, - 1056, 3148, 16272, - 768, 3464, 16244, - 512, 3728, 16236, - 296, 3928, 16252, - 124, 4052, 16296, - 0, 4096, 0 + 0x0804, 0x07FC, 0x0000, + 0x06AC, 0x0978, 0x3FDC, + 0x055C, 0x0AF0, 0x3FB4, + 0x0420, 0x0C50, 0x3F90, + 0x0300, 0x0D88, 0x3F78, + 0x0200, 0x0E90, 0x3F70, + 0x0128, 0x0F5C, 0x3F7C, + 0x007C, 0x0FD8, 0x3FAC, + 0x0000, 0x1000, 0x0000 }; -static const uint16_t filter_3tap_16p_117[27] = { - 2048, 2048, 0, - 1824, 2276, 16376, - 1600, 2496, 16380, - 1376, 2700, 16, - 1156, 2880, 52, - 948, 3032, 108, - 756, 3144, 192, - 580, 3212, 296, - 428, 3236, 428 +//========================================= +// <num_taps> = 3 +// <num_phases> = 16 +// <scale_ratio> = 1.16666 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_3tap_16p_116[27] = { + 0x0804, 0x07FC, 0x0000, + 0x0700, 0x0914, 0x3FEC, + 0x0604, 0x0A1C, 0x3FE0, + 0x050C, 0x0B14, 0x3FE0, + 0x041C, 0x0BF4, 0x3FF0, + 0x0340, 0x0CB0, 0x0010, + 0x0274, 0x0D3C, 0x0050, + 0x01C0, 0x0D94, 0x00AC, + 0x0128, 0x0DB4, 0x0124 }; -static const uint16_t filter_3tap_16p_150[27] = { - 2048, 2048, 0, - 1872, 2184, 36, - 1692, 2308, 88, - 1516, 2420, 156, - 1340, 2516, 236, - 1168, 2592, 328, - 1004, 2648, 440, - 844, 2684, 560, - 696, 2696, 696 +//========================================= +// <num_taps> = 3 +// <num_phases> = 16 +// <scale_ratio> = 1.49999 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_3tap_16p_149[27] = { + 0x0804, 0x07FC, 0x0000, + 0x0730, 0x08CC, 0x0004, + 0x0660, 0x098C, 0x0014, + 0x0590, 0x0A3C, 0x0034, + 0x04C4, 0x0AD4, 0x0068, + 0x0400, 0x0B54, 0x00AC, + 0x0348, 0x0BB0, 0x0108, + 0x029C, 0x0BEC, 0x0178, + 0x0200, 0x0C00, 0x0200 }; +//========================================= +// <num_taps> = 3 +// <num_phases> = 16 +// <scale_ratio> = 1.83332 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_3tap_16p_183[27] = { - 2048, 2048, 0, - 1892, 2104, 92, - 1744, 2152, 196, - 1592, 2196, 300, - 1448, 2232, 412, - 1304, 2256, 528, - 1168, 2276, 648, - 1032, 2288, 772, - 900, 2292, 900 + 0x0804, 0x07FC, 0x0000, + 0x0754, 0x0880, 0x002C, + 0x06A8, 0x08F0, 0x0068, + 0x05FC, 0x0954, 0x00B0, + 0x0550, 0x09AC, 0x0104, + 0x04A8, 0x09F0, 0x0168, + 0x0408, 0x0A20, 0x01D8, + 0x036C, 0x0A40, 0x0254, + 0x02DC, 0x0A48, 0x02DC }; +//========================================= +// <num_taps> = 4 +// <num_phases> = 16 +// <scale_ratio> = 0.83333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_4tap_16p_upscale[36] = { - 0, 4096, 0, 0, - 16240, 4056, 180, 16380, - 16136, 3952, 404, 16364, - 16072, 3780, 664, 16344, - 16040, 3556, 952, 16312, - 16036, 3284, 1268, 16272, - 16052, 2980, 1604, 16224, - 16084, 2648, 1952, 16176, - 16128, 2304, 2304, 16128 + 0x0000, 0x1000, 0x0000, 0x0000, + 0x3F74, 0x0FDC, 0x00B4, 0x3FFC, + 0x3F0C, 0x0F70, 0x0194, 0x3FF0, + 0x3ECC, 0x0EC4, 0x0298, 0x3FD8, + 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8, + 0x3EA4, 0x0CD8, 0x04F4, 0x3F90, + 0x3EB8, 0x0BA0, 0x0644, 0x3F64, + 0x3ED8, 0x0A54, 0x07A0, 0x3F34, + 0x3F00, 0x08FC, 0x0900, 0x3F04 }; -static const uint16_t filter_4tap_16p_117[36] = { - 428, 3236, 428, 0, - 276, 3232, 604, 16364, - 148, 3184, 800, 16340, - 44, 3104, 1016, 16312, - 16344, 2984, 1244, 16284, - 16284, 2832, 1488, 16256, - 16244, 2648, 1732, 16236, - 16220, 2440, 1976, 16220, - 16212, 2216, 2216, 16212 +//========================================= +// <num_taps> = 4 +// <num_phases> = 16 +// <scale_ratio> = 1.16666 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_4tap_16p_116[36] = { + 0x01A8, 0x0CB4, 0x01A4, 0x0000, + 0x0110, 0x0CB0, 0x0254, 0x3FEC, + 0x0090, 0x0C80, 0x031C, 0x3FD4, + 0x0024, 0x0C2C, 0x03F4, 0x3FBC, + 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0, + 0x3F9C, 0x0B14, 0x05CC, 0x3F84, + 0x3F70, 0x0A60, 0x06C4, 0x3F6C, + 0x3F5C, 0x098C, 0x07BC, 0x3F5C, + 0x3F54, 0x08AC, 0x08AC, 0x3F54 }; -static const uint16_t filter_4tap_16p_150[36] = { - 696, 2700, 696, 0, - 560, 2700, 848, 16364, - 436, 2676, 1008, 16348, - 328, 2628, 1180, 16336, - 232, 2556, 1356, 16328, - 152, 2460, 1536, 16328, - 84, 2344, 1716, 16332, - 28, 2208, 1888, 16348, - 16376, 2052, 2052, 16376 +//========================================= +// <num_taps> = 4 +// <num_phases> = 16 +// <scale_ratio> = 1.49999 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_4tap_16p_149[36] = { + 0x02B8, 0x0A90, 0x02B8, 0x0000, + 0x0230, 0x0A90, 0x0350, 0x3FF0, + 0x01B8, 0x0A78, 0x03F0, 0x3FE0, + 0x0148, 0x0A48, 0x049C, 0x3FD4, + 0x00E8, 0x0A00, 0x054C, 0x3FCC, + 0x0098, 0x09A0, 0x0600, 0x3FC8, + 0x0054, 0x0928, 0x06B4, 0x3FD0, + 0x001C, 0x08A4, 0x0760, 0x3FE0, + 0x3FFC, 0x0804, 0x0804, 0x3FFC }; +//========================================= +// <num_taps> = 4 +// <num_phases> = 16 +// <scale_ratio> = 1.83332 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_4tap_16p_183[36] = { - 940, 2208, 940, 0, - 832, 2200, 1052, 4, - 728, 2180, 1164, 16, - 628, 2148, 1280, 36, - 536, 2100, 1392, 60, - 448, 2044, 1504, 92, - 368, 1976, 1612, 132, - 296, 1900, 1716, 176, - 232, 1812, 1812, 232 + 0x03B0, 0x08A0, 0x03B0, 0x0000, + 0x0348, 0x0898, 0x041C, 0x0004, + 0x02DC, 0x0884, 0x0490, 0x0010, + 0x0278, 0x0864, 0x0500, 0x0024, + 0x021C, 0x0838, 0x0570, 0x003C, + 0x01C8, 0x07FC, 0x05E0, 0x005C, + 0x0178, 0x07B8, 0x064C, 0x0084, + 0x0130, 0x076C, 0x06B0, 0x00B4, + 0x00F0, 0x0714, 0x0710, 0x00EC }; +//========================================= +// <num_taps> = 2 +// <num_phases> = 64 +// <scale_ratio> = 0.833333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = s1.10 +// <CoefOut> = s1.12 +//========================================= static const uint16_t filter_2tap_64p[66] = { - 4096, 0, - 4032, 64, - 3968, 128, - 3904, 192, - 3840, 256, - 3776, 320, - 3712, 384, - 3648, 448, - 3584, 512, - 3520, 576, - 3456, 640, - 3392, 704, - 3328, 768, - 3264, 832, - 3200, 896, - 3136, 960, - 3072, 1024, - 3008, 1088, - 2944, 1152, - 2880, 1216, - 2816, 1280, - 2752, 1344, - 2688, 1408, - 2624, 1472, - 2560, 1536, - 2496, 1600, - 2432, 1664, - 2368, 1728, - 2304, 1792, - 2240, 1856, - 2176, 1920, - 2112, 1984, - 2048, 2048 }; + 0x1000, 0x0000, + 0x1000, 0x0000, + 0x0FFC, 0x0004, + 0x0FF8, 0x0008, + 0x0FF0, 0x0010, + 0x0FE4, 0x001C, + 0x0FD8, 0x0028, + 0x0FC4, 0x003C, + 0x0FB0, 0x0050, + 0x0F98, 0x0068, + 0x0F7C, 0x0084, + 0x0F58, 0x00A8, + 0x0F34, 0x00CC, + 0x0F08, 0x00F8, + 0x0ED8, 0x0128, + 0x0EA4, 0x015C, + 0x0E68, 0x0198, + 0x0E28, 0x01D8, + 0x0DE4, 0x021C, + 0x0D98, 0x0268, + 0x0D44, 0x02BC, + 0x0CEC, 0x0314, + 0x0C90, 0x0370, + 0x0C2C, 0x03D4, + 0x0BC4, 0x043C, + 0x0B58, 0x04A8, + 0x0AE8, 0x0518, + 0x0A74, 0x058C, + 0x09FC, 0x0604, + 0x0980, 0x0680, + 0x0900, 0x0700, + 0x0880, 0x0780, + 0x0800, 0x0800 +}; +//========================================= +// <num_taps> = 3 +// <num_phases> = 64 +// <scale_ratio> = 0.83333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_3tap_64p_upscale[99] = { - 2048, 2048, 0, - 1960, 2140, 16376, - 1876, 2236, 16364, - 1792, 2328, 16356, - 1708, 2424, 16348, - 1620, 2516, 16336, - 1540, 2612, 16328, - 1456, 2704, 16316, - 1372, 2796, 16308, - 1292, 2884, 16296, - 1212, 2976, 16288, - 1136, 3060, 16280, - 1056, 3148, 16272, - 984, 3228, 16264, - 908, 3312, 16256, - 836, 3388, 16248, - 768, 3464, 16244, - 700, 3536, 16240, - 636, 3604, 16236, - 572, 3668, 16236, - 512, 3728, 16236, - 456, 3784, 16236, - 400, 3836, 16240, - 348, 3884, 16244, - 296, 3928, 16252, - 252, 3964, 16260, - 204, 4000, 16268, - 164, 4028, 16284, - 124, 4052, 16296, - 88, 4072, 16316, - 56, 4084, 16336, - 24, 4092, 16356, - 0, 4096, 0 + 0x0804, 0x07FC, 0x0000, + 0x07A8, 0x0860, 0x3FF8, + 0x0754, 0x08BC, 0x3FF0, + 0x0700, 0x0918, 0x3FE8, + 0x06AC, 0x0978, 0x3FDC, + 0x0654, 0x09D8, 0x3FD4, + 0x0604, 0x0A34, 0x3FC8, + 0x05B0, 0x0A90, 0x3FC0, + 0x055C, 0x0AF0, 0x3FB4, + 0x050C, 0x0B48, 0x3FAC, + 0x04BC, 0x0BA0, 0x3FA4, + 0x0470, 0x0BF4, 0x3F9C, + 0x0420, 0x0C50, 0x3F90, + 0x03D8, 0x0C9C, 0x3F8C, + 0x038C, 0x0CF0, 0x3F84, + 0x0344, 0x0D40, 0x3F7C, + 0x0300, 0x0D88, 0x3F78, + 0x02BC, 0x0DD0, 0x3F74, + 0x027C, 0x0E14, 0x3F70, + 0x023C, 0x0E54, 0x3F70, + 0x0200, 0x0E90, 0x3F70, + 0x01C8, 0x0EC8, 0x3F70, + 0x0190, 0x0EFC, 0x3F74, + 0x015C, 0x0F2C, 0x3F78, + 0x0128, 0x0F5C, 0x3F7C, + 0x00FC, 0x0F7C, 0x3F88, + 0x00CC, 0x0FA4, 0x3F90, + 0x00A4, 0x0FC0, 0x3F9C, + 0x007C, 0x0FD8, 0x3FAC, + 0x0058, 0x0FE8, 0x3FC0, + 0x0038, 0x0FF4, 0x3FD4, + 0x0018, 0x1000, 0x3FE8, + 0x0000, 0x1000, 0x0000 }; -static const uint16_t filter_3tap_64p_117[99] = { - 2048, 2048, 0, - 1992, 2104, 16380, - 1936, 2160, 16380, - 1880, 2220, 16376, - 1824, 2276, 16376, - 1768, 2332, 16376, - 1712, 2388, 16376, - 1656, 2444, 16376, - 1600, 2496, 16380, - 1544, 2548, 0, - 1488, 2600, 4, - 1432, 2652, 8, - 1376, 2700, 16, - 1320, 2748, 20, - 1264, 2796, 32, - 1212, 2840, 40, - 1156, 2880, 52, - 1104, 2920, 64, - 1052, 2960, 80, - 1000, 2996, 92, - 948, 3032, 108, - 900, 3060, 128, - 852, 3092, 148, - 804, 3120, 168, - 756, 3144, 192, - 712, 3164, 216, - 668, 3184, 240, - 624, 3200, 268, - 580, 3212, 296, - 540, 3220, 328, - 500, 3228, 360, - 464, 3232, 392, - 428, 3236, 428 +//========================================= +// <num_taps> = 3 +// <num_phases> = 64 +// <scale_ratio> = 1.16666 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_3tap_64p_116[99] = { + 0x0804, 0x07FC, 0x0000, + 0x07C0, 0x0844, 0x3FFC, + 0x0780, 0x0888, 0x3FF8, + 0x0740, 0x08D0, 0x3FF0, + 0x0700, 0x0914, 0x3FEC, + 0x06C0, 0x0958, 0x3FE8, + 0x0684, 0x0998, 0x3FE4, + 0x0644, 0x09DC, 0x3FE0, + 0x0604, 0x0A1C, 0x3FE0, + 0x05C4, 0x0A5C, 0x3FE0, + 0x0588, 0x0A9C, 0x3FDC, + 0x0548, 0x0ADC, 0x3FDC, + 0x050C, 0x0B14, 0x3FE0, + 0x04CC, 0x0B54, 0x3FE0, + 0x0490, 0x0B8C, 0x3FE4, + 0x0458, 0x0BC0, 0x3FE8, + 0x041C, 0x0BF4, 0x3FF0, + 0x03E0, 0x0C28, 0x3FF8, + 0x03A8, 0x0C58, 0x0000, + 0x0374, 0x0C88, 0x0004, + 0x0340, 0x0CB0, 0x0010, + 0x0308, 0x0CD8, 0x0020, + 0x02D8, 0x0CFC, 0x002C, + 0x02A0, 0x0D20, 0x0040, + 0x0274, 0x0D3C, 0x0050, + 0x0244, 0x0D58, 0x0064, + 0x0214, 0x0D70, 0x007C, + 0x01E8, 0x0D84, 0x0094, + 0x01C0, 0x0D94, 0x00AC, + 0x0198, 0x0DA0, 0x00C8, + 0x0170, 0x0DAC, 0x00E4, + 0x014C, 0x0DB0, 0x0104, + 0x0128, 0x0DB4, 0x0124 }; -static const uint16_t filter_3tap_64p_150[99] = { - 2048, 2048, 0, - 2004, 2080, 8, - 1960, 2116, 16, - 1916, 2148, 28, - 1872, 2184, 36, - 1824, 2216, 48, - 1780, 2248, 60, - 1736, 2280, 76, - 1692, 2308, 88, - 1648, 2336, 104, - 1604, 2368, 120, - 1560, 2392, 136, - 1516, 2420, 156, - 1472, 2444, 172, - 1428, 2472, 192, - 1384, 2492, 212, - 1340, 2516, 236, - 1296, 2536, 256, - 1252, 2556, 280, - 1212, 2576, 304, - 1168, 2592, 328, - 1124, 2608, 356, - 1084, 2624, 384, - 1044, 2636, 412, - 1004, 2648, 440, - 964, 2660, 468, - 924, 2668, 500, - 884, 2676, 528, - 844, 2684, 560, - 808, 2688, 596, - 768, 2692, 628, - 732, 2696, 664, - 696, 2696, 696 +//========================================= +// <num_taps> = 3 +// <num_phases> = 64 +// <scale_ratio> = 1.49999 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_3tap_64p_149[99] = { + 0x0804, 0x07FC, 0x0000, + 0x07CC, 0x0834, 0x0000, + 0x0798, 0x0868, 0x0000, + 0x0764, 0x089C, 0x0000, + 0x0730, 0x08CC, 0x0004, + 0x0700, 0x08FC, 0x0004, + 0x06CC, 0x092C, 0x0008, + 0x0698, 0x095C, 0x000C, + 0x0660, 0x098C, 0x0014, + 0x062C, 0x09B8, 0x001C, + 0x05FC, 0x09E4, 0x0020, + 0x05C4, 0x0A10, 0x002C, + 0x0590, 0x0A3C, 0x0034, + 0x055C, 0x0A64, 0x0040, + 0x0528, 0x0A8C, 0x004C, + 0x04F8, 0x0AB0, 0x0058, + 0x04C4, 0x0AD4, 0x0068, + 0x0490, 0x0AF8, 0x0078, + 0x0460, 0x0B18, 0x0088, + 0x0430, 0x0B38, 0x0098, + 0x0400, 0x0B54, 0x00AC, + 0x03D0, 0x0B6C, 0x00C4, + 0x03A0, 0x0B88, 0x00D8, + 0x0374, 0x0B9C, 0x00F0, + 0x0348, 0x0BB0, 0x0108, + 0x0318, 0x0BC4, 0x0124, + 0x02EC, 0x0BD4, 0x0140, + 0x02C4, 0x0BE0, 0x015C, + 0x029C, 0x0BEC, 0x0178, + 0x0274, 0x0BF4, 0x0198, + 0x024C, 0x0BFC, 0x01B8, + 0x0228, 0x0BFC, 0x01DC, + 0x0200, 0x0C00, 0x0200 }; +//========================================= +// <num_taps> = 3 +// <num_phases> = 64 +// <scale_ratio> = 1.83332 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_3tap_64p_183[99] = { - 2048, 2048, 0, - 2008, 2060, 20, - 1968, 2076, 44, - 1932, 2088, 68, - 1892, 2104, 92, - 1856, 2116, 120, - 1816, 2128, 144, - 1780, 2140, 168, - 1744, 2152, 196, - 1704, 2164, 220, - 1668, 2176, 248, - 1632, 2188, 272, - 1592, 2196, 300, - 1556, 2204, 328, - 1520, 2216, 356, - 1484, 2224, 384, - 1448, 2232, 412, - 1412, 2240, 440, - 1376, 2244, 468, - 1340, 2252, 496, - 1304, 2256, 528, - 1272, 2264, 556, - 1236, 2268, 584, - 1200, 2272, 616, - 1168, 2276, 648, - 1132, 2280, 676, - 1100, 2284, 708, - 1064, 2288, 740, - 1032, 2288, 772, - 996, 2292, 800, - 964, 2292, 832, - 932, 2292, 868, - 900, 2292, 900 + 0x0804, 0x07FC, 0x0000, + 0x07D4, 0x0824, 0x0008, + 0x07AC, 0x0840, 0x0014, + 0x0780, 0x0860, 0x0020, + 0x0754, 0x0880, 0x002C, + 0x0728, 0x089C, 0x003C, + 0x0700, 0x08B8, 0x0048, + 0x06D4, 0x08D4, 0x0058, + 0x06A8, 0x08F0, 0x0068, + 0x067C, 0x090C, 0x0078, + 0x0650, 0x0924, 0x008C, + 0x0628, 0x093C, 0x009C, + 0x05FC, 0x0954, 0x00B0, + 0x05D0, 0x096C, 0x00C4, + 0x05A8, 0x0980, 0x00D8, + 0x0578, 0x0998, 0x00F0, + 0x0550, 0x09AC, 0x0104, + 0x0528, 0x09BC, 0x011C, + 0x04FC, 0x09D0, 0x0134, + 0x04D4, 0x09E0, 0x014C, + 0x04A8, 0x09F0, 0x0168, + 0x0480, 0x09FC, 0x0184, + 0x045C, 0x0A08, 0x019C, + 0x0434, 0x0A14, 0x01B8, + 0x0408, 0x0A20, 0x01D8, + 0x03E0, 0x0A2C, 0x01F4, + 0x03B8, 0x0A34, 0x0214, + 0x0394, 0x0A38, 0x0234, + 0x036C, 0x0A40, 0x0254, + 0x0348, 0x0A44, 0x0274, + 0x0324, 0x0A48, 0x0294, + 0x0300, 0x0A48, 0x02B8, + 0x02DC, 0x0A48, 0x02DC }; +//========================================= +// <num_taps> = 4 +// <num_phases> = 64 +// <scale_ratio> = 0.83333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_4tap_64p_upscale[132] = { - 0, 4096, 0, 0, - 16344, 4092, 40, 0, - 16308, 4084, 84, 16380, - 16272, 4072, 132, 16380, - 16240, 4056, 180, 16380, - 16212, 4036, 232, 16376, - 16184, 4012, 288, 16372, - 16160, 3984, 344, 16368, - 16136, 3952, 404, 16364, - 16116, 3916, 464, 16360, - 16100, 3872, 528, 16356, - 16084, 3828, 596, 16348, - 16072, 3780, 664, 16344, - 16060, 3728, 732, 16336, - 16052, 3676, 804, 16328, - 16044, 3616, 876, 16320, - 16040, 3556, 952, 16312, - 16036, 3492, 1028, 16300, - 16032, 3424, 1108, 16292, - 16032, 3356, 1188, 16280, - 16036, 3284, 1268, 16272, - 16036, 3212, 1352, 16260, - 16040, 3136, 1436, 16248, - 16044, 3056, 1520, 16236, - 16052, 2980, 1604, 16224, - 16060, 2896, 1688, 16212, - 16064, 2816, 1776, 16200, - 16076, 2732, 1864, 16188, - 16084, 2648, 1952, 16176, - 16092, 2564, 2040, 16164, - 16104, 2476, 2128, 16152, - 16116, 2388, 2216, 16140, - 16128, 2304, 2304, 16128 }; + 0x0000, 0x1000, 0x0000, 0x0000, + 0x3FDC, 0x0FFC, 0x0028, 0x0000, + 0x3FB4, 0x0FF8, 0x0054, 0x0000, + 0x3F94, 0x0FE8, 0x0084, 0x0000, + 0x3F74, 0x0FDC, 0x00B4, 0x3FFC, + 0x3F58, 0x0FC4, 0x00E8, 0x3FFC, + 0x3F3C, 0x0FAC, 0x0120, 0x3FF8, + 0x3F24, 0x0F90, 0x0158, 0x3FF4, + 0x3F0C, 0x0F70, 0x0194, 0x3FF0, + 0x3EF8, 0x0F4C, 0x01D0, 0x3FEC, + 0x3EE8, 0x0F20, 0x0210, 0x3FE8, + 0x3ED8, 0x0EF4, 0x0254, 0x3FE0, + 0x3ECC, 0x0EC4, 0x0298, 0x3FD8, + 0x3EC0, 0x0E90, 0x02DC, 0x3FD4, + 0x3EB8, 0x0E58, 0x0324, 0x3FCC, + 0x3EB0, 0x0E20, 0x036C, 0x3FC4, + 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8, + 0x3EA8, 0x0DA4, 0x0404, 0x3FB0, + 0x3EA4, 0x0D60, 0x0454, 0x3FA8, + 0x3EA4, 0x0D1C, 0x04A4, 0x3F9C, + 0x3EA4, 0x0CD8, 0x04F4, 0x3F90, + 0x3EA8, 0x0C88, 0x0548, 0x3F88, + 0x3EAC, 0x0C3C, 0x059C, 0x3F7C, + 0x3EB0, 0x0BF0, 0x05F0, 0x3F70, + 0x3EB8, 0x0BA0, 0x0644, 0x3F64, + 0x3EBC, 0x0B54, 0x0698, 0x3F58, + 0x3EC4, 0x0B00, 0x06F0, 0x3F4C, + 0x3ECC, 0x0AAC, 0x0748, 0x3F40, + 0x3ED8, 0x0A54, 0x07A0, 0x3F34, + 0x3EE0, 0x0A04, 0x07F8, 0x3F24, + 0x3EEC, 0x09AC, 0x0850, 0x3F18, + 0x3EF8, 0x0954, 0x08A8, 0x3F0C, + 0x3F00, 0x08FC, 0x0900, 0x3F04 +}; -static const uint16_t filter_4tap_64p_117[132] = { - 420, 3248, 420, 0, - 380, 3248, 464, 16380, - 344, 3248, 508, 16372, - 308, 3248, 552, 16368, - 272, 3240, 596, 16364, - 236, 3236, 644, 16356, - 204, 3224, 692, 16352, - 172, 3212, 744, 16344, - 144, 3196, 796, 16340, - 116, 3180, 848, 16332, - 88, 3160, 900, 16324, - 60, 3136, 956, 16320, - 36, 3112, 1012, 16312, - 16, 3084, 1068, 16304, - 16380, 3056, 1124, 16296, - 16360, 3024, 1184, 16292, - 16340, 2992, 1244, 16284, - 16324, 2956, 1304, 16276, - 16308, 2920, 1364, 16268, - 16292, 2880, 1424, 16264, - 16280, 2836, 1484, 16256, - 16268, 2792, 1548, 16252, - 16256, 2748, 1608, 16244, - 16248, 2700, 1668, 16240, - 16240, 2652, 1732, 16232, - 16232, 2604, 1792, 16228, - 16228, 2552, 1856, 16224, - 16220, 2500, 1916, 16220, - 16216, 2444, 1980, 16216, - 16216, 2388, 2040, 16216, - 16212, 2332, 2100, 16212, - 16212, 2276, 2160, 16212, - 16212, 2220, 2220, 16212 }; +//========================================= +// <num_taps> = 4 +// <num_phases> = 64 +// <scale_ratio> = 1.16666 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_4tap_64p_116[132] = { + 0x01A8, 0x0CB4, 0x01A4, 0x0000, + 0x017C, 0x0CB8, 0x01D0, 0x3FFC, + 0x0158, 0x0CB8, 0x01F8, 0x3FF8, + 0x0130, 0x0CB4, 0x0228, 0x3FF4, + 0x0110, 0x0CB0, 0x0254, 0x3FEC, + 0x00EC, 0x0CA8, 0x0284, 0x3FE8, + 0x00CC, 0x0C9C, 0x02B4, 0x3FE4, + 0x00AC, 0x0C90, 0x02E8, 0x3FDC, + 0x0090, 0x0C80, 0x031C, 0x3FD4, + 0x0070, 0x0C70, 0x0350, 0x3FD0, + 0x0058, 0x0C5C, 0x0384, 0x3FC8, + 0x003C, 0x0C48, 0x03BC, 0x3FC0, + 0x0024, 0x0C2C, 0x03F4, 0x3FBC, + 0x0010, 0x0C10, 0x042C, 0x3FB4, + 0x3FFC, 0x0BF4, 0x0464, 0x3FAC, + 0x3FE8, 0x0BD4, 0x04A0, 0x3FA4, + 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0, + 0x3FC4, 0x0B8C, 0x0518, 0x3F98, + 0x3FB4, 0x0B68, 0x0554, 0x3F90, + 0x3FA8, 0x0B40, 0x0590, 0x3F88, + 0x3F9C, 0x0B14, 0x05CC, 0x3F84, + 0x3F90, 0x0AEC, 0x0608, 0x3F7C, + 0x3F84, 0x0ABC, 0x0648, 0x3F78, + 0x3F7C, 0x0A90, 0x0684, 0x3F70, + 0x3F70, 0x0A60, 0x06C4, 0x3F6C, + 0x3F6C, 0x0A2C, 0x0700, 0x3F68, + 0x3F64, 0x09F8, 0x0740, 0x3F64, + 0x3F60, 0x09C4, 0x077C, 0x3F60, + 0x3F5C, 0x098C, 0x07BC, 0x3F5C, + 0x3F58, 0x0958, 0x07F8, 0x3F58, + 0x3F58, 0x091C, 0x0834, 0x3F58, + 0x3F54, 0x08E4, 0x0870, 0x3F58, + 0x3F54, 0x08AC, 0x08AC, 0x3F54 +}; -static const uint16_t filter_4tap_64p_150[132] = { - 696, 2700, 696, 0, - 660, 2704, 732, 16380, - 628, 2704, 768, 16376, - 596, 2704, 804, 16372, - 564, 2700, 844, 16364, - 532, 2696, 884, 16360, - 500, 2692, 924, 16356, - 472, 2684, 964, 16352, - 440, 2676, 1004, 16352, - 412, 2668, 1044, 16348, - 384, 2656, 1088, 16344, - 360, 2644, 1128, 16340, - 332, 2632, 1172, 16336, - 308, 2616, 1216, 16336, - 284, 2600, 1260, 16332, - 260, 2580, 1304, 16332, - 236, 2560, 1348, 16328, - 216, 2540, 1392, 16328, - 196, 2516, 1436, 16328, - 176, 2492, 1480, 16324, - 156, 2468, 1524, 16324, - 136, 2440, 1568, 16328, - 120, 2412, 1612, 16328, - 104, 2384, 1656, 16328, - 88, 2352, 1700, 16332, - 72, 2324, 1744, 16332, - 60, 2288, 1788, 16336, - 48, 2256, 1828, 16340, - 36, 2220, 1872, 16344, - 24, 2184, 1912, 16352, - 12, 2148, 1952, 16356, - 4, 2112, 1996, 16364, - 16380, 2072, 2036, 16372 }; +//========================================= +// <num_taps> = 4 +// <num_phases> = 64 +// <scale_ratio> = 1.49999 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_4tap_64p_149[132] = { + 0x02B8, 0x0A90, 0x02B8, 0x0000, + 0x0294, 0x0A94, 0x02DC, 0x3FFC, + 0x0274, 0x0A94, 0x0300, 0x3FF8, + 0x0250, 0x0A94, 0x0328, 0x3FF4, + 0x0230, 0x0A90, 0x0350, 0x3FF0, + 0x0214, 0x0A8C, 0x0374, 0x3FEC, + 0x01F0, 0x0A88, 0x03A0, 0x3FE8, + 0x01D4, 0x0A80, 0x03C8, 0x3FE4, + 0x01B8, 0x0A78, 0x03F0, 0x3FE0, + 0x0198, 0x0A70, 0x041C, 0x3FDC, + 0x0180, 0x0A64, 0x0444, 0x3FD8, + 0x0164, 0x0A54, 0x0470, 0x3FD8, + 0x0148, 0x0A48, 0x049C, 0x3FD4, + 0x0130, 0x0A38, 0x04C8, 0x3FD0, + 0x0118, 0x0A24, 0x04F4, 0x3FD0, + 0x0100, 0x0A14, 0x0520, 0x3FCC, + 0x00E8, 0x0A00, 0x054C, 0x3FCC, + 0x00D4, 0x09E8, 0x057C, 0x3FC8, + 0x00C0, 0x09D0, 0x05A8, 0x3FC8, + 0x00AC, 0x09B8, 0x05D4, 0x3FC8, + 0x0098, 0x09A0, 0x0600, 0x3FC8, + 0x0084, 0x0984, 0x0630, 0x3FC8, + 0x0074, 0x0964, 0x065C, 0x3FCC, + 0x0064, 0x0948, 0x0688, 0x3FCC, + 0x0054, 0x0928, 0x06B4, 0x3FD0, + 0x0044, 0x0908, 0x06E0, 0x3FD4, + 0x0038, 0x08E8, 0x070C, 0x3FD4, + 0x002C, 0x08C4, 0x0738, 0x3FD8, + 0x001C, 0x08A4, 0x0760, 0x3FE0, + 0x0014, 0x087C, 0x078C, 0x3FE4, + 0x0008, 0x0858, 0x07B4, 0x3FEC, + 0x0000, 0x0830, 0x07DC, 0x3FF4, + 0x3FFC, 0x0804, 0x0804, 0x3FFC +}; +//========================================= +// <num_taps> = 4 +// <num_phases> = 64 +// <scale_ratio> = 1.83332 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_4tap_64p_183[132] = { - 944, 2204, 944, 0, - 916, 2204, 972, 0, - 888, 2200, 996, 0, - 860, 2200, 1024, 4, - 832, 2196, 1052, 4, - 808, 2192, 1080, 8, - 780, 2188, 1108, 12, - 756, 2180, 1140, 12, - 728, 2176, 1168, 16, - 704, 2168, 1196, 20, - 680, 2160, 1224, 24, - 656, 2152, 1252, 28, - 632, 2144, 1280, 36, - 608, 2132, 1308, 40, - 584, 2120, 1336, 48, - 560, 2112, 1364, 52, - 536, 2096, 1392, 60, - 516, 2084, 1420, 68, - 492, 2072, 1448, 76, - 472, 2056, 1476, 84, - 452, 2040, 1504, 92, - 428, 2024, 1532, 100, - 408, 2008, 1560, 112, - 392, 1992, 1584, 120, - 372, 1972, 1612, 132, - 352, 1956, 1636, 144, - 336, 1936, 1664, 156, - 316, 1916, 1688, 168, - 300, 1896, 1712, 180, - 284, 1876, 1736, 192, - 268, 1852, 1760, 208, - 252, 1832, 1784, 220, - 236, 1808, 1808, 236 }; + 0x03B0, 0x08A0, 0x03B0, 0x0000, + 0x0394, 0x08A0, 0x03CC, 0x0000, + 0x037C, 0x089C, 0x03E8, 0x0000, + 0x0360, 0x089C, 0x0400, 0x0004, + 0x0348, 0x0898, 0x041C, 0x0004, + 0x032C, 0x0894, 0x0438, 0x0008, + 0x0310, 0x0890, 0x0454, 0x000C, + 0x02F8, 0x0888, 0x0474, 0x000C, + 0x02DC, 0x0884, 0x0490, 0x0010, + 0x02C4, 0x087C, 0x04AC, 0x0014, + 0x02AC, 0x0874, 0x04C8, 0x0018, + 0x0290, 0x086C, 0x04E4, 0x0020, + 0x0278, 0x0864, 0x0500, 0x0024, + 0x0264, 0x0858, 0x051C, 0x0028, + 0x024C, 0x084C, 0x0538, 0x0030, + 0x0234, 0x0844, 0x0554, 0x0034, + 0x021C, 0x0838, 0x0570, 0x003C, + 0x0208, 0x0828, 0x058C, 0x0044, + 0x01F0, 0x081C, 0x05A8, 0x004C, + 0x01DC, 0x080C, 0x05C4, 0x0054, + 0x01C8, 0x07FC, 0x05E0, 0x005C, + 0x01B4, 0x07EC, 0x05FC, 0x0064, + 0x019C, 0x07DC, 0x0618, 0x0070, + 0x018C, 0x07CC, 0x0630, 0x0078, + 0x0178, 0x07B8, 0x064C, 0x0084, + 0x0164, 0x07A8, 0x0664, 0x0090, + 0x0150, 0x0794, 0x0680, 0x009C, + 0x0140, 0x0780, 0x0698, 0x00A8, + 0x0130, 0x076C, 0x06B0, 0x00B4, + 0x0120, 0x0758, 0x06C8, 0x00C0, + 0x0110, 0x0740, 0x06E0, 0x00D0, + 0x0100, 0x072C, 0x06F8, 0x00DC, + 0x00F0, 0x0714, 0x0710, 0x00EC +}; +//========================================= +// <num_taps> = 5 +// <num_phases> = 64 +// <scale_ratio> = 0.83333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_5tap_64p_upscale[165] = { - 15936, 2496, 2496, 15936, 0, - 15948, 2404, 2580, 15924, 0, - 15960, 2312, 2664, 15912, 4, - 15976, 2220, 2748, 15904, 8, - 15992, 2128, 2832, 15896, 12, - 16004, 2036, 2912, 15888, 16, - 16020, 1944, 2992, 15880, 20, - 16036, 1852, 3068, 15876, 20, - 16056, 1760, 3140, 15876, 24, - 16072, 1668, 3216, 15872, 28, - 16088, 1580, 3284, 15872, 32, - 16104, 1492, 3352, 15876, 32, - 16120, 1404, 3420, 15876, 36, - 16140, 1316, 3480, 15884, 40, - 16156, 1228, 3540, 15892, 40, - 16172, 1144, 3600, 15900, 40, - 16188, 1060, 3652, 15908, 44, - 16204, 980, 3704, 15924, 44, - 16220, 900, 3756, 15936, 44, - 16236, 824, 3800, 15956, 44, - 16248, 744, 3844, 15972, 44, - 16264, 672, 3884, 15996, 44, - 16276, 600, 3920, 16020, 44, - 16292, 528, 3952, 16044, 40, - 16304, 460, 3980, 16072, 40, - 16316, 396, 4008, 16104, 36, - 16328, 332, 4032, 16136, 32, - 16336, 272, 4048, 16172, 28, - 16348, 212, 4064, 16208, 24, - 16356, 156, 4080, 16248, 16, - 16368, 100, 4088, 16292, 12, - 16376, 48, 4092, 16336, 4, - 0, 0, 4096, 0, 0 }; + 0x3E40, 0x09C0, 0x09C0, 0x3E40, 0x0000, + 0x3E50, 0x0964, 0x0A18, 0x3E34, 0x0000, + 0x3E5C, 0x0908, 0x0A6C, 0x3E2C, 0x0004, + 0x3E6C, 0x08AC, 0x0AC0, 0x3E20, 0x0008, + 0x3E78, 0x0850, 0x0B14, 0x3E18, 0x000C, + 0x3E88, 0x07F4, 0x0B60, 0x3E14, 0x0010, + 0x3E98, 0x0798, 0x0BB0, 0x3E0C, 0x0014, + 0x3EA8, 0x073C, 0x0C00, 0x3E08, 0x0014, + 0x3EB8, 0x06E4, 0x0C48, 0x3E04, 0x0018, + 0x3ECC, 0x0684, 0x0C90, 0x3E04, 0x001C, + 0x3EDC, 0x062C, 0x0CD4, 0x3E04, 0x0020, + 0x3EEC, 0x05D4, 0x0D1C, 0x3E04, 0x0020, + 0x3EFC, 0x057C, 0x0D5C, 0x3E08, 0x0024, + 0x3F0C, 0x0524, 0x0D98, 0x3E10, 0x0028, + 0x3F20, 0x04CC, 0x0DD8, 0x3E14, 0x0028, + 0x3F30, 0x0478, 0x0E14, 0x3E1C, 0x0028, + 0x3F40, 0x0424, 0x0E48, 0x3E28, 0x002C, + 0x3F50, 0x03D4, 0x0E7C, 0x3E34, 0x002C, + 0x3F60, 0x0384, 0x0EAC, 0x3E44, 0x002C, + 0x3F6C, 0x0338, 0x0EDC, 0x3E54, 0x002C, + 0x3F7C, 0x02E8, 0x0F08, 0x3E68, 0x002C, + 0x3F8C, 0x02A0, 0x0F2C, 0x3E7C, 0x002C, + 0x3F98, 0x0258, 0x0F50, 0x3E94, 0x002C, + 0x3FA4, 0x0210, 0x0F74, 0x3EB0, 0x0028, + 0x3FB0, 0x01CC, 0x0F90, 0x3ECC, 0x0028, + 0x3FC0, 0x018C, 0x0FA8, 0x3EE8, 0x0024, + 0x3FC8, 0x014C, 0x0FC0, 0x3F0C, 0x0020, + 0x3FD4, 0x0110, 0x0FD4, 0x3F2C, 0x001C, + 0x3FE0, 0x00D4, 0x0FE0, 0x3F54, 0x0018, + 0x3FE8, 0x009C, 0x0FF0, 0x3F7C, 0x0010, + 0x3FF0, 0x0064, 0x0FFC, 0x3FA4, 0x000C, + 0x3FFC, 0x0030, 0x0FFC, 0x3FD4, 0x0004, + 0x0000, 0x0000, 0x1000, 0x0000, 0x0000 +}; -static const uint16_t filter_5tap_64p_117[165] = { - 16056, 2372, 2372, 16056, 0, - 16052, 2312, 2432, 16060, 0, - 16052, 2252, 2488, 16064, 0, - 16052, 2188, 2548, 16072, 0, - 16052, 2124, 2600, 16076, 0, - 16052, 2064, 2656, 16088, 0, - 16052, 2000, 2708, 16096, 0, - 16056, 1932, 2760, 16108, 0, - 16060, 1868, 2808, 16120, 0, - 16064, 1804, 2856, 16132, 0, - 16068, 1740, 2904, 16148, 16380, - 16076, 1676, 2948, 16164, 16380, - 16080, 1612, 2992, 16180, 16376, - 16088, 1544, 3032, 16200, 16372, - 16096, 1480, 3072, 16220, 16372, - 16104, 1420, 3108, 16244, 16368, - 16112, 1356, 3144, 16268, 16364, - 16120, 1292, 3180, 16292, 16360, - 16128, 1232, 3212, 16320, 16356, - 16136, 1168, 3240, 16344, 16352, - 16144, 1108, 3268, 16376, 16344, - 16156, 1048, 3292, 20, 16340, - 16164, 988, 3316, 52, 16332, - 16172, 932, 3336, 88, 16328, - 16184, 872, 3356, 124, 16320, - 16192, 816, 3372, 160, 16316, - 16204, 760, 3388, 196, 16308, - 16212, 708, 3400, 236, 16300, - 16220, 656, 3412, 276, 16292, - 16232, 604, 3420, 320, 16284, - 16240, 552, 3424, 364, 16276, - 16248, 504, 3428, 408, 16268, - 16256, 456, 3428, 456, 16256 }; +//========================================= +// <num_taps> = 5 +// <num_phases> = 64 +// <scale_ratio> = 1.16666 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_5tap_64p_116[165] = { + 0x3EDC, 0x0924, 0x0924, 0x3EDC, 0x0000, + 0x3ED8, 0x08EC, 0x095C, 0x3EE0, 0x0000, + 0x3ED4, 0x08B0, 0x0994, 0x3EE8, 0x0000, + 0x3ED0, 0x0878, 0x09C8, 0x3EF0, 0x0000, + 0x3ED0, 0x083C, 0x09FC, 0x3EF8, 0x0000, + 0x3ED0, 0x0800, 0x0A2C, 0x3F04, 0x0000, + 0x3ED0, 0x07C4, 0x0A5C, 0x3F10, 0x0000, + 0x3ED0, 0x0788, 0x0A8C, 0x3F1C, 0x0000, + 0x3ED0, 0x074C, 0x0AC0, 0x3F28, 0x3FFC, + 0x3ED4, 0x0710, 0x0AE8, 0x3F38, 0x3FFC, + 0x3ED8, 0x06D0, 0x0B18, 0x3F48, 0x3FF8, + 0x3EDC, 0x0694, 0x0B3C, 0x3F5C, 0x3FF8, + 0x3EE0, 0x0658, 0x0B68, 0x3F6C, 0x3FF4, + 0x3EE4, 0x061C, 0x0B90, 0x3F80, 0x3FF0, + 0x3EEC, 0x05DC, 0x0BB4, 0x3F98, 0x3FEC, + 0x3EF0, 0x05A0, 0x0BD8, 0x3FB0, 0x3FE8, + 0x3EF8, 0x0564, 0x0BF8, 0x3FC8, 0x3FE4, + 0x3EFC, 0x0528, 0x0C1C, 0x3FE0, 0x3FE0, + 0x3F04, 0x04EC, 0x0C38, 0x3FFC, 0x3FDC, + 0x3F0C, 0x04B4, 0x0C54, 0x0014, 0x3FD8, + 0x3F14, 0x047C, 0x0C70, 0x0030, 0x3FD0, + 0x3F1C, 0x0440, 0x0C88, 0x0050, 0x3FCC, + 0x3F24, 0x0408, 0x0CA0, 0x0070, 0x3FC4, + 0x3F2C, 0x03D0, 0x0CB0, 0x0094, 0x3FC0, + 0x3F34, 0x0398, 0x0CC4, 0x00B8, 0x3FB8, + 0x3F3C, 0x0364, 0x0CD4, 0x00DC, 0x3FB0, + 0x3F48, 0x032C, 0x0CE0, 0x0100, 0x3FAC, + 0x3F50, 0x02F8, 0x0CEC, 0x0128, 0x3FA4, + 0x3F58, 0x02C4, 0x0CF8, 0x0150, 0x3F9C, + 0x3F60, 0x0290, 0x0D00, 0x017C, 0x3F94, + 0x3F68, 0x0260, 0x0D04, 0x01A8, 0x3F8C, + 0x3F74, 0x0230, 0x0D04, 0x01D4, 0x3F84, + 0x3F7C, 0x0200, 0x0D08, 0x0200, 0x3F7C +}; -static const uint16_t filter_5tap_64p_150[165] = { - 16368, 2064, 2064, 16368, 0, - 16352, 2028, 2100, 16380, 16380, - 16340, 1996, 2132, 12, 16376, - 16328, 1960, 2168, 24, 16376, - 16316, 1924, 2204, 44, 16372, - 16308, 1888, 2236, 60, 16368, - 16296, 1848, 2268, 76, 16364, - 16288, 1812, 2300, 96, 16360, - 16280, 1772, 2328, 116, 16356, - 16272, 1736, 2360, 136, 16352, - 16268, 1696, 2388, 160, 16348, - 16260, 1656, 2416, 180, 16344, - 16256, 1616, 2440, 204, 16340, - 16248, 1576, 2464, 228, 16336, - 16244, 1536, 2492, 252, 16332, - 16240, 1496, 2512, 276, 16324, - 16240, 1456, 2536, 304, 16320, - 16236, 1416, 2556, 332, 16316, - 16232, 1376, 2576, 360, 16312, - 16232, 1336, 2592, 388, 16308, - 16232, 1296, 2612, 416, 16300, - 16232, 1256, 2628, 448, 16296, - 16232, 1216, 2640, 480, 16292, - 16232, 1172, 2652, 512, 16288, - 16232, 1132, 2664, 544, 16284, - 16232, 1092, 2676, 576, 16280, - 16236, 1056, 2684, 608, 16272, - 16236, 1016, 2692, 644, 16268, - 16240, 976, 2700, 680, 16264, - 16240, 936, 2704, 712, 16260, - 16244, 900, 2708, 748, 16256, - 16248, 860, 2708, 788, 16252, - 16248, 824, 2708, 824, 16248 }; +//========================================= +// <num_taps> = 5 +// <num_phases> = 64 +// <scale_ratio> = 1.49999 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_5tap_64p_149[165] = { + 0x3FF4, 0x080C, 0x080C, 0x3FF4, 0x0000, + 0x3FE8, 0x07E8, 0x0830, 0x0000, 0x0000, + 0x3FDC, 0x07C8, 0x0850, 0x0010, 0x3FFC, + 0x3FD0, 0x07A4, 0x0878, 0x001C, 0x3FF8, + 0x3FC4, 0x0780, 0x0898, 0x0030, 0x3FF4, + 0x3FB8, 0x075C, 0x08B8, 0x0040, 0x3FF4, + 0x3FB0, 0x0738, 0x08D8, 0x0050, 0x3FF0, + 0x3FA8, 0x0710, 0x08F8, 0x0064, 0x3FEC, + 0x3FA0, 0x06EC, 0x0914, 0x0078, 0x3FE8, + 0x3F98, 0x06C4, 0x0934, 0x008C, 0x3FE4, + 0x3F90, 0x06A0, 0x094C, 0x00A4, 0x3FE0, + 0x3F8C, 0x0678, 0x0968, 0x00B8, 0x3FDC, + 0x3F84, 0x0650, 0x0984, 0x00D0, 0x3FD8, + 0x3F80, 0x0628, 0x099C, 0x00E8, 0x3FD4, + 0x3F7C, 0x0600, 0x09B8, 0x0100, 0x3FCC, + 0x3F78, 0x05D8, 0x09D0, 0x0118, 0x3FC8, + 0x3F74, 0x05B0, 0x09E4, 0x0134, 0x3FC4, + 0x3F70, 0x0588, 0x09F8, 0x0150, 0x3FC0, + 0x3F70, 0x0560, 0x0A08, 0x016C, 0x3FBC, + 0x3F6C, 0x0538, 0x0A20, 0x0188, 0x3FB4, + 0x3F6C, 0x0510, 0x0A30, 0x01A4, 0x3FB0, + 0x3F6C, 0x04E8, 0x0A3C, 0x01C4, 0x3FAC, + 0x3F6C, 0x04C0, 0x0A48, 0x01E4, 0x3FA8, + 0x3F6C, 0x0498, 0x0A58, 0x0200, 0x3FA4, + 0x3F6C, 0x0470, 0x0A60, 0x0224, 0x3FA0, + 0x3F6C, 0x0448, 0x0A70, 0x0244, 0x3F98, + 0x3F70, 0x0420, 0x0A78, 0x0264, 0x3F94, + 0x3F70, 0x03F8, 0x0A80, 0x0288, 0x3F90, + 0x3F74, 0x03D4, 0x0A84, 0x02A8, 0x3F8C, + 0x3F74, 0x03AC, 0x0A8C, 0x02CC, 0x3F88, + 0x3F78, 0x0384, 0x0A90, 0x02F0, 0x3F84, + 0x3F7C, 0x0360, 0x0A90, 0x0314, 0x3F80, + 0x3F7C, 0x033C, 0x0A90, 0x033C, 0x3F7C +}; +//========================================= +// <num_taps> = 5 +// <num_phases> = 64 +// <scale_ratio> = 1.83332 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_5tap_64p_183[165] = { - 228, 1816, 1816, 228, 0, - 216, 1792, 1836, 248, 16380, - 200, 1772, 1860, 264, 16376, - 184, 1748, 1884, 280, 16376, - 168, 1728, 1904, 300, 16372, - 156, 1704, 1928, 316, 16368, - 144, 1680, 1948, 336, 16364, - 128, 1656, 1968, 356, 16364, - 116, 1632, 1988, 376, 16360, - 104, 1604, 2008, 396, 16356, - 96, 1580, 2024, 416, 16356, - 84, 1556, 2044, 440, 16352, - 72, 1528, 2060, 460, 16348, - 64, 1504, 2076, 484, 16348, - 52, 1476, 2092, 504, 16344, - 44, 1448, 2104, 528, 16344, - 36, 1424, 2120, 552, 16340, - 28, 1396, 2132, 576, 16340, - 20, 1368, 2144, 600, 16340, - 12, 1340, 2156, 624, 16336, - 4, 1312, 2168, 652, 16336, - 0, 1284, 2180, 676, 16336, - 16376, 1256, 2188, 700, 16332, - 16372, 1228, 2196, 728, 16332, - 16368, 1200, 2204, 752, 16332, - 16364, 1172, 2212, 780, 16332, - 16356, 1144, 2216, 808, 16332, - 16352, 1116, 2220, 836, 16332, - 16352, 1084, 2224, 860, 16332, - 16348, 1056, 2228, 888, 16336, - 16344, 1028, 2232, 916, 16336, - 16340, 1000, 2232, 944, 16336, - 16340, 972, 2232, 972, 16340 }; + 0x0168, 0x069C, 0x0698, 0x0164, 0x0000, + 0x0154, 0x068C, 0x06AC, 0x0174, 0x0000, + 0x0144, 0x0674, 0x06C0, 0x0188, 0x0000, + 0x0138, 0x0664, 0x06D0, 0x0198, 0x3FFC, + 0x0128, 0x0654, 0x06E0, 0x01A8, 0x3FFC, + 0x0118, 0x0640, 0x06F0, 0x01BC, 0x3FFC, + 0x010C, 0x0630, 0x0700, 0x01CC, 0x3FF8, + 0x00FC, 0x061C, 0x0710, 0x01E0, 0x3FF8, + 0x00F0, 0x060C, 0x071C, 0x01F0, 0x3FF8, + 0x00E4, 0x05F4, 0x072C, 0x0204, 0x3FF8, + 0x00D8, 0x05E4, 0x0738, 0x0218, 0x3FF4, + 0x00CC, 0x05D0, 0x0744, 0x022C, 0x3FF4, + 0x00C0, 0x05B8, 0x0754, 0x0240, 0x3FF4, + 0x00B4, 0x05A4, 0x0760, 0x0254, 0x3FF4, + 0x00A8, 0x0590, 0x076C, 0x0268, 0x3FF4, + 0x009C, 0x057C, 0x0778, 0x027C, 0x3FF4, + 0x0094, 0x0564, 0x0780, 0x0294, 0x3FF4, + 0x0088, 0x0550, 0x0788, 0x02A8, 0x3FF8, + 0x0080, 0x0538, 0x0794, 0x02BC, 0x3FF8, + 0x0074, 0x0524, 0x079C, 0x02D4, 0x3FF8, + 0x006C, 0x0510, 0x07A4, 0x02E8, 0x3FF8, + 0x0064, 0x04F4, 0x07AC, 0x0300, 0x3FFC, + 0x005C, 0x04E4, 0x07B0, 0x0314, 0x3FFC, + 0x0054, 0x04C8, 0x07B8, 0x032C, 0x0000, + 0x004C, 0x04B4, 0x07C0, 0x0340, 0x0000, + 0x0044, 0x04A0, 0x07C4, 0x0358, 0x0000, + 0x003C, 0x0488, 0x07C8, 0x0370, 0x0004, + 0x0038, 0x0470, 0x07CC, 0x0384, 0x0008, + 0x0030, 0x045C, 0x07D0, 0x039C, 0x0008, + 0x002C, 0x0444, 0x07D0, 0x03B4, 0x000C, + 0x0024, 0x042C, 0x07D4, 0x03CC, 0x0010, + 0x0020, 0x0414, 0x07D4, 0x03E0, 0x0018, + 0x001C, 0x03FC, 0x07D4, 0x03F8, 0x001C +}; +//========================================= +// <num_taps> = 6 +// <num_phases> = 64 +// <scale_ratio> = 0.83333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_6tap_64p_upscale[198] = { - 0, 0, 4092, 0, 0, 0, - 12, 16332, 4092, 52, 16368, 0, - 24, 16280, 4088, 108, 16356, 0, - 36, 16236, 4080, 168, 16340, 0, - 44, 16188, 4064, 228, 16324, 0, - 56, 16148, 4052, 292, 16308, 0, - 64, 16108, 4032, 356, 16292, 4, - 72, 16072, 4008, 424, 16276, 4, - 80, 16036, 3980, 492, 16256, 4, - 88, 16004, 3952, 564, 16240, 8, - 96, 15972, 3920, 636, 16220, 8, - 100, 15944, 3884, 712, 16204, 12, - 108, 15916, 3844, 788, 16184, 16, - 112, 15896, 3800, 864, 16164, 20, - 116, 15872, 3756, 944, 16144, 20, - 120, 15852, 3708, 1024, 16124, 24, - 120, 15836, 3656, 1108, 16104, 28, - 124, 15824, 3600, 1192, 16084, 32, - 124, 15808, 3544, 1276, 16064, 36, - 124, 15800, 3484, 1360, 16044, 40, - 128, 15792, 3420, 1448, 16024, 44, - 128, 15784, 3352, 1536, 16004, 48, - 124, 15780, 3288, 1624, 15988, 52, - 124, 15776, 3216, 1712, 15968, 56, - 124, 15776, 3144, 1800, 15948, 64, - 120, 15776, 3068, 1888, 15932, 68, - 120, 15780, 2992, 1976, 15912, 72, - 116, 15784, 2916, 2064, 15896, 76, - 112, 15792, 2836, 2152, 15880, 80, - 108, 15796, 2752, 2244, 15868, 84, - 104, 15804, 2672, 2328, 15852, 88, - 104, 15816, 2588, 2416, 15840, 92, - 100, 15828, 2504, 2504, 15828, 100 }; + 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, + 0x000C, 0x3FD0, 0x0FFC, 0x0034, 0x3FF4, 0x0000, + 0x0018, 0x3F9C, 0x0FF8, 0x006C, 0x3FE8, 0x0000, + 0x0024, 0x3F6C, 0x0FF0, 0x00A8, 0x3FD8, 0x0000, + 0x002C, 0x3F44, 0x0FE4, 0x00E4, 0x3FC8, 0x0000, + 0x0038, 0x3F18, 0x0FD4, 0x0124, 0x3FB8, 0x0000, + 0x0040, 0x3EF0, 0x0FC0, 0x0164, 0x3FA8, 0x0004, + 0x0048, 0x3EC8, 0x0FAC, 0x01A8, 0x3F98, 0x0004, + 0x0050, 0x3EA8, 0x0F94, 0x01EC, 0x3F84, 0x0004, + 0x0058, 0x3E84, 0x0F74, 0x0234, 0x3F74, 0x0008, + 0x0060, 0x3E68, 0x0F54, 0x027C, 0x3F60, 0x0008, + 0x0064, 0x3E4C, 0x0F30, 0x02C8, 0x3F4C, 0x000C, + 0x006C, 0x3E30, 0x0F04, 0x0314, 0x3F3C, 0x0010, + 0x0070, 0x3E18, 0x0EDC, 0x0360, 0x3F28, 0x0014, + 0x0074, 0x3E04, 0x0EB0, 0x03B0, 0x3F14, 0x0014, + 0x0078, 0x3DF0, 0x0E80, 0x0400, 0x3F00, 0x0018, + 0x0078, 0x3DE0, 0x0E4C, 0x0454, 0x3EEC, 0x001C, + 0x007C, 0x3DD0, 0x0E14, 0x04A8, 0x3ED8, 0x0020, + 0x007C, 0x3DC4, 0x0DDC, 0x04FC, 0x3EC4, 0x0024, + 0x007C, 0x3DBC, 0x0DA0, 0x0550, 0x3EB0, 0x0028, + 0x0080, 0x3DB4, 0x0D5C, 0x05A8, 0x3E9C, 0x002C, + 0x0080, 0x3DAC, 0x0D1C, 0x0600, 0x3E88, 0x0030, + 0x007C, 0x3DA8, 0x0CDC, 0x0658, 0x3E74, 0x0034, + 0x007C, 0x3DA4, 0x0C94, 0x06B0, 0x3E64, 0x0038, + 0x007C, 0x3DA4, 0x0C48, 0x0708, 0x3E50, 0x0040, + 0x0078, 0x3DA4, 0x0C00, 0x0760, 0x3E40, 0x0044, + 0x0078, 0x3DA8, 0x0BB4, 0x07B8, 0x3E2C, 0x0048, + 0x0074, 0x3DAC, 0x0B68, 0x0810, 0x3E1C, 0x004C, + 0x0070, 0x3DB4, 0x0B18, 0x0868, 0x3E0C, 0x0050, + 0x006C, 0x3DBC, 0x0AC4, 0x08C4, 0x3DFC, 0x0054, + 0x0068, 0x3DC4, 0x0A74, 0x0918, 0x3DF0, 0x0058, + 0x0068, 0x3DCC, 0x0A20, 0x0970, 0x3DE0, 0x005C, + 0x0064, 0x3DD4, 0x09C8, 0x09C8, 0x3DD4, 0x0064 +}; -static const uint16_t filter_6tap_64p_117[198] = { - 16168, 476, 3568, 476, 16168, 0, - 16180, 428, 3564, 528, 16156, 0, - 16192, 376, 3556, 584, 16144, 4, - 16204, 328, 3548, 636, 16128, 4, - 16216, 280, 3540, 692, 16116, 8, - 16228, 232, 3524, 748, 16104, 12, - 16240, 188, 3512, 808, 16092, 12, - 16252, 148, 3492, 864, 16080, 16, - 16264, 104, 3472, 924, 16068, 16, - 16276, 64, 3452, 984, 16056, 20, - 16284, 28, 3428, 1044, 16048, 24, - 16296, 16376, 3400, 1108, 16036, 24, - 16304, 16340, 3372, 1168, 16024, 28, - 16316, 16304, 3340, 1232, 16016, 32, - 16324, 16272, 3308, 1296, 16004, 32, - 16332, 16244, 3272, 1360, 15996, 36, - 16344, 16212, 3236, 1424, 15988, 36, - 16352, 16188, 3200, 1488, 15980, 40, - 16360, 16160, 3160, 1552, 15972, 40, - 16368, 16136, 3116, 1616, 15964, 40, - 16372, 16112, 3072, 1680, 15956, 44, - 16380, 16092, 3028, 1744, 15952, 44, - 0, 16072, 2980, 1808, 15948, 44, - 8, 16052, 2932, 1872, 15944, 48, - 12, 16036, 2880, 1936, 15940, 48, - 16, 16020, 2828, 2000, 15936, 48, - 20, 16008, 2776, 2064, 15936, 48, - 24, 15996, 2724, 2128, 15936, 48, - 28, 15984, 2668, 2192, 15936, 48, - 32, 15972, 2612, 2252, 15940, 44, - 36, 15964, 2552, 2316, 15940, 44, - 40, 15956, 2496, 2376, 15944, 44, - 40, 15952, 2436, 2436, 15952, 40 }; +//========================================= +// <num_taps> = 6 +// <num_phases> = 64 +// <scale_ratio> = 1.16666 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_6tap_64p_116[198] = { + 0x3F0C, 0x0240, 0x0D68, 0x0240, 0x3F0C, 0x0000, + 0x3F18, 0x0210, 0x0D64, 0x0274, 0x3F00, 0x0000, + 0x3F24, 0x01E0, 0x0D58, 0x02A8, 0x3EF8, 0x0004, + 0x3F2C, 0x01B0, 0x0D58, 0x02DC, 0x3EEC, 0x0004, + 0x3F38, 0x0180, 0x0D50, 0x0310, 0x3EE0, 0x0008, + 0x3F44, 0x0154, 0x0D40, 0x0348, 0x3ED8, 0x0008, + 0x3F50, 0x0128, 0x0D34, 0x037C, 0x3ECC, 0x000C, + 0x3F5C, 0x00FC, 0x0D20, 0x03B4, 0x3EC4, 0x0010, + 0x3F64, 0x00D4, 0x0D14, 0x03EC, 0x3EB8, 0x0010, + 0x3F70, 0x00AC, 0x0CFC, 0x0424, 0x3EB0, 0x0014, + 0x3F78, 0x0084, 0x0CE8, 0x0460, 0x3EA8, 0x0014, + 0x3F84, 0x0060, 0x0CCC, 0x0498, 0x3EA0, 0x0018, + 0x3F90, 0x003C, 0x0CB4, 0x04D0, 0x3E98, 0x0018, + 0x3F98, 0x0018, 0x0C9C, 0x050C, 0x3E90, 0x0018, + 0x3FA0, 0x3FFC, 0x0C78, 0x0548, 0x3E88, 0x001C, + 0x3FAC, 0x3FDC, 0x0C54, 0x0584, 0x3E84, 0x001C, + 0x3FB4, 0x3FBC, 0x0C3C, 0x05BC, 0x3E7C, 0x001C, + 0x3FBC, 0x3FA0, 0x0C14, 0x05F8, 0x3E78, 0x0020, + 0x3FC4, 0x3F84, 0x0BF0, 0x0634, 0x3E74, 0x0020, + 0x3FCC, 0x3F68, 0x0BCC, 0x0670, 0x3E70, 0x0020, + 0x3FD4, 0x3F50, 0x0BA4, 0x06AC, 0x3E6C, 0x0020, + 0x3FDC, 0x3F38, 0x0B78, 0x06E8, 0x3E6C, 0x0020, + 0x3FE0, 0x3F24, 0x0B50, 0x0724, 0x3E68, 0x0020, + 0x3FE8, 0x3F0C, 0x0B24, 0x0760, 0x3E68, 0x0020, + 0x3FF0, 0x3EFC, 0x0AF4, 0x0798, 0x3E68, 0x0020, + 0x3FF4, 0x3EE8, 0x0AC8, 0x07D4, 0x3E68, 0x0020, + 0x3FFC, 0x3ED8, 0x0A94, 0x0810, 0x3E6C, 0x001C, + 0x0000, 0x3EC8, 0x0A64, 0x0848, 0x3E70, 0x001C, + 0x0000, 0x3EB8, 0x0A38, 0x0880, 0x3E74, 0x001C, + 0x0004, 0x3EAC, 0x0A04, 0x08BC, 0x3E78, 0x0018, + 0x0008, 0x3EA4, 0x09D0, 0x08F4, 0x3E7C, 0x0014, + 0x000C, 0x3E98, 0x0998, 0x092C, 0x3E84, 0x0014, + 0x0010, 0x3E90, 0x0964, 0x0960, 0x3E8C, 0x0010 +}; -static const uint16_t filter_6tap_64p_150[198] = { - 16148, 920, 2724, 920, 16148, 0, - 16152, 880, 2724, 956, 16148, 0, - 16152, 844, 2720, 996, 16144, 0, - 16156, 804, 2716, 1032, 16144, 0, - 16156, 768, 2712, 1072, 16144, 0, - 16160, 732, 2708, 1112, 16144, 16380, - 16164, 696, 2700, 1152, 16144, 16380, - 16168, 660, 2692, 1192, 16148, 16380, - 16172, 628, 2684, 1232, 16148, 16380, - 16176, 592, 2672, 1272, 16152, 16376, - 16180, 560, 2660, 1312, 16152, 16376, - 16184, 524, 2648, 1348, 16156, 16376, - 16192, 492, 2632, 1388, 16160, 16372, - 16196, 460, 2616, 1428, 16164, 16372, - 16200, 432, 2600, 1468, 16168, 16368, - 16204, 400, 2584, 1508, 16176, 16364, - 16212, 368, 2564, 1548, 16180, 16364, - 16216, 340, 2544, 1588, 16188, 16360, - 16220, 312, 2524, 1628, 16196, 16356, - 16228, 284, 2504, 1668, 16204, 16356, - 16232, 256, 2480, 1704, 16212, 16352, - 16240, 232, 2456, 1744, 16224, 16348, - 16244, 204, 2432, 1780, 16232, 16344, - 16248, 180, 2408, 1820, 16244, 16340, - 16256, 156, 2380, 1856, 16256, 16336, - 16260, 132, 2352, 1896, 16268, 16332, - 16268, 108, 2324, 1932, 16280, 16328, - 16272, 88, 2296, 1968, 16292, 16324, - 16276, 64, 2268, 2004, 16308, 16320, - 16284, 44, 2236, 2036, 16324, 16312, - 16288, 24, 2204, 2072, 16340, 16308, - 16292, 8, 2172, 2108, 16356, 16304, - 16300, 16372, 2140, 2140, 16372, 16300 }; +//========================================= +// <num_taps> = 6 +// <num_phases> = 64 +// <scale_ratio> = 1.49999 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_6tap_64p_149[198] = { + 0x3F14, 0x0394, 0x0AB0, 0x0394, 0x3F14, 0x0000, + 0x3F18, 0x036C, 0x0AB0, 0x03B8, 0x3F14, 0x0000, + 0x3F18, 0x0348, 0x0AAC, 0x03E0, 0x3F14, 0x0000, + 0x3F1C, 0x0320, 0x0AAC, 0x0408, 0x3F10, 0x0000, + 0x3F20, 0x02FC, 0x0AA8, 0x042C, 0x3F10, 0x0000, + 0x3F24, 0x02D8, 0x0AA0, 0x0454, 0x3F10, 0x0000, + 0x3F28, 0x02B4, 0x0A98, 0x047C, 0x3F10, 0x0000, + 0x3F28, 0x0290, 0x0A90, 0x04A4, 0x3F14, 0x0000, + 0x3F30, 0x026C, 0x0A84, 0x04CC, 0x3F14, 0x0000, + 0x3F34, 0x024C, 0x0A7C, 0x04F4, 0x3F14, 0x3FFC, + 0x3F38, 0x0228, 0x0A70, 0x051C, 0x3F18, 0x3FFC, + 0x3F3C, 0x0208, 0x0A64, 0x0544, 0x3F1C, 0x3FF8, + 0x3F40, 0x01E8, 0x0A54, 0x056C, 0x3F20, 0x3FF8, + 0x3F44, 0x01C8, 0x0A48, 0x0594, 0x3F24, 0x3FF4, + 0x3F4C, 0x01A8, 0x0A34, 0x05BC, 0x3F28, 0x3FF4, + 0x3F50, 0x0188, 0x0A28, 0x05E4, 0x3F2C, 0x3FF0, + 0x3F54, 0x016C, 0x0A10, 0x060C, 0x3F34, 0x3FF0, + 0x3F5C, 0x014C, 0x09FC, 0x0634, 0x3F3C, 0x3FEC, + 0x3F60, 0x0130, 0x09EC, 0x065C, 0x3F40, 0x3FE8, + 0x3F68, 0x0114, 0x09D0, 0x0684, 0x3F48, 0x3FE8, + 0x3F6C, 0x00F8, 0x09B8, 0x06AC, 0x3F54, 0x3FE4, + 0x3F74, 0x00E0, 0x09A0, 0x06D0, 0x3F5C, 0x3FE0, + 0x3F78, 0x00C4, 0x098C, 0x06F8, 0x3F64, 0x3FDC, + 0x3F7C, 0x00AC, 0x0970, 0x0720, 0x3F70, 0x3FD8, + 0x3F84, 0x0094, 0x0954, 0x0744, 0x3F7C, 0x3FD4, + 0x3F88, 0x007C, 0x093C, 0x0768, 0x3F88, 0x3FD0, + 0x3F90, 0x0064, 0x091C, 0x0790, 0x3F94, 0x3FCC, + 0x3F94, 0x0050, 0x08FC, 0x07B4, 0x3FA4, 0x3FC8, + 0x3F98, 0x003C, 0x08E0, 0x07D8, 0x3FB0, 0x3FC4, + 0x3FA0, 0x0024, 0x08C0, 0x07FC, 0x3FC0, 0x3FC0, + 0x3FA4, 0x0014, 0x08A4, 0x081C, 0x3FD0, 0x3FB8, + 0x3FAC, 0x0000, 0x0880, 0x0840, 0x3FE0, 0x3FB4, + 0x3FB0, 0x3FF0, 0x0860, 0x0860, 0x3FF0, 0x3FB0 +}; +//========================================= +// <num_taps> = 6 +// <num_phases> = 64 +// <scale_ratio> = 1.83332 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_6tap_64p_183[198] = { - 16296, 1032, 2196, 1032, 16296, 0, - 16292, 1004, 2200, 1060, 16304, 16380, - 16288, 976, 2200, 1088, 16308, 16380, - 16284, 952, 2196, 1116, 16312, 16376, - 16284, 924, 2196, 1144, 16320, 16376, - 16280, 900, 2192, 1172, 16324, 16372, - 16276, 872, 2192, 1200, 16332, 16368, - 16276, 848, 2188, 1228, 16340, 16368, - 16272, 820, 2180, 1256, 16348, 16364, - 16272, 796, 2176, 1280, 16356, 16360, - 16268, 768, 2168, 1308, 16364, 16360, - 16268, 744, 2164, 1336, 16372, 16356, - 16268, 716, 2156, 1364, 16380, 16352, - 16264, 692, 2148, 1392, 4, 16352, - 16264, 668, 2136, 1420, 16, 16348, - 16264, 644, 2128, 1448, 28, 16344, - 16264, 620, 2116, 1472, 36, 16340, - 16264, 596, 2108, 1500, 48, 16340, - 16268, 572, 2096, 1524, 60, 16336, - 16268, 548, 2080, 1552, 72, 16332, - 16268, 524, 2068, 1576, 88, 16328, - 16268, 504, 2056, 1604, 100, 16324, - 16272, 480, 2040, 1628, 112, 16324, - 16272, 456, 2024, 1652, 128, 16320, - 16272, 436, 2008, 1680, 144, 16316, - 16276, 416, 1992, 1704, 156, 16312, - 16276, 392, 1976, 1724, 172, 16308, - 16280, 372, 1956, 1748, 188, 16308, - 16280, 352, 1940, 1772, 204, 16304, - 16284, 332, 1920, 1796, 224, 16300, - 16288, 312, 1900, 1816, 240, 16296, - 16288, 296, 1880, 1840, 256, 16296, - 16292, 276, 1860, 1860, 276, 16292 }; + 0x002C, 0x0420, 0x076C, 0x041C, 0x002C, 0x0000, + 0x0028, 0x040C, 0x0768, 0x0430, 0x0034, 0x0000, + 0x0020, 0x03F8, 0x0768, 0x0448, 0x003C, 0x3FFC, + 0x0018, 0x03E4, 0x0768, 0x045C, 0x0044, 0x3FFC, + 0x0014, 0x03D0, 0x0768, 0x0470, 0x004C, 0x3FF8, + 0x000C, 0x03BC, 0x0764, 0x0484, 0x0058, 0x3FF8, + 0x0008, 0x03A4, 0x0764, 0x049C, 0x0060, 0x3FF4, + 0x0004, 0x0390, 0x0760, 0x04B0, 0x0068, 0x3FF4, + 0x0000, 0x037C, 0x0760, 0x04C4, 0x0070, 0x3FF0, + 0x3FFC, 0x0364, 0x075C, 0x04D8, 0x007C, 0x3FF0, + 0x3FF8, 0x0350, 0x0758, 0x04F0, 0x0084, 0x3FEC, + 0x3FF4, 0x033C, 0x0750, 0x0504, 0x0090, 0x3FEC, + 0x3FF0, 0x0328, 0x074C, 0x0518, 0x009C, 0x3FE8, + 0x3FEC, 0x0314, 0x0744, 0x052C, 0x00A8, 0x3FE8, + 0x3FE8, 0x0304, 0x0740, 0x0540, 0x00B0, 0x3FE4, + 0x3FE4, 0x02EC, 0x073C, 0x0554, 0x00BC, 0x3FE4, + 0x3FE0, 0x02DC, 0x0734, 0x0568, 0x00C8, 0x3FE0, + 0x3FE0, 0x02C4, 0x072C, 0x057C, 0x00D4, 0x3FE0, + 0x3FDC, 0x02B4, 0x0724, 0x058C, 0x00E4, 0x3FDC, + 0x3FDC, 0x02A0, 0x0718, 0x05A0, 0x00F0, 0x3FDC, + 0x3FD8, 0x028C, 0x0714, 0x05B4, 0x00FC, 0x3FD8, + 0x3FD8, 0x0278, 0x0704, 0x05C8, 0x010C, 0x3FD8, + 0x3FD4, 0x0264, 0x0700, 0x05D8, 0x0118, 0x3FD8, + 0x3FD4, 0x0254, 0x06F0, 0x05EC, 0x0128, 0x3FD4, + 0x3FD0, 0x0244, 0x06E8, 0x05FC, 0x0134, 0x3FD4, + 0x3FD0, 0x0230, 0x06DC, 0x060C, 0x0144, 0x3FD4, + 0x3FD0, 0x021C, 0x06D0, 0x0620, 0x0154, 0x3FD0, + 0x3FD0, 0x0208, 0x06C4, 0x0630, 0x0164, 0x3FD0, + 0x3FD0, 0x01F8, 0x06B8, 0x0640, 0x0170, 0x3FD0, + 0x3FCC, 0x01E8, 0x06AC, 0x0650, 0x0180, 0x3FD0, + 0x3FCC, 0x01D8, 0x069C, 0x0660, 0x0190, 0x3FD0, + 0x3FCC, 0x01C4, 0x068C, 0x0670, 0x01A4, 0x3FD0, + 0x3FCC, 0x01B8, 0x0680, 0x067C, 0x01B4, 0x3FCC +}; +//========================================= +// <num_taps> = 7 +// <num_phases> = 64 +// <scale_ratio> = 0.83333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_7tap_64p_upscale[231] = { - 176, 15760, 2488, 2488, 15760, 176, 0, - 172, 15772, 2404, 2572, 15752, 180, 16380, - 168, 15784, 2324, 2656, 15740, 184, 16380, - 164, 15800, 2240, 2736, 15732, 188, 16376, - 160, 15812, 2152, 2816, 15728, 192, 16376, - 152, 15828, 2068, 2896, 15724, 192, 16376, - 148, 15848, 1984, 2972, 15720, 196, 16372, - 140, 15864, 1896, 3048, 15720, 196, 16372, - 136, 15884, 1812, 3124, 15720, 196, 16368, - 128, 15900, 1724, 3196, 15720, 196, 16368, - 120, 15920, 1640, 3268, 15724, 196, 16368, - 116, 15940, 1552, 3336, 15732, 196, 16364, - 108, 15964, 1468, 3400, 15740, 196, 16364, - 104, 15984, 1384, 3464, 15748, 192, 16364, - 96, 16004, 1300, 3524, 15760, 188, 16364, - 88, 16028, 1216, 3584, 15776, 184, 16364, - 84, 16048, 1132, 3640, 15792, 180, 16360, - 76, 16072, 1048, 3692, 15812, 176, 16360, - 68, 16092, 968, 3744, 15832, 168, 16360, - 64, 16116, 888, 3788, 15856, 160, 16360, - 56, 16140, 812, 3832, 15884, 152, 16360, - 52, 16160, 732, 3876, 15912, 144, 16360, - 44, 16184, 656, 3912, 15944, 136, 16364, - 40, 16204, 584, 3944, 15976, 124, 16364, - 32, 16228, 512, 3976, 16012, 116, 16364, - 28, 16248, 440, 4004, 16048, 104, 16364, - 24, 16268, 372, 4028, 16092, 88, 16368, - 20, 16288, 304, 4048, 16132, 76, 16368, - 12, 16308, 240, 4064, 16180, 60, 16372, - 8, 16328, 176, 4076, 16228, 48, 16372, - 4, 16348, 112, 4088, 16276, 32, 16376, - 0, 16364, 56, 4092, 16328, 16, 16380, - 0, 0, 0, 4096, 0, 0, 0 }; + 0x00B0, 0x3D98, 0x09BC, 0x09B8, 0x3D94, 0x00B0, 0x0000, + 0x00AC, 0x3DA0, 0x0968, 0x0A10, 0x3D88, 0x00B4, 0x0000, + 0x00A8, 0x3DAC, 0x0914, 0x0A60, 0x3D80, 0x00B8, 0x0000, + 0x00A4, 0x3DB8, 0x08C0, 0x0AB4, 0x3D78, 0x00BC, 0x3FFC, + 0x00A0, 0x3DC8, 0x0868, 0x0B00, 0x3D74, 0x00C0, 0x3FFC, + 0x0098, 0x3DD8, 0x0818, 0x0B54, 0x3D6C, 0x00C0, 0x3FF8, + 0x0094, 0x3DE8, 0x07C0, 0x0B9C, 0x3D6C, 0x00C4, 0x3FF8, + 0x008C, 0x3DFC, 0x0768, 0x0BEC, 0x3D68, 0x00C4, 0x3FF8, + 0x0088, 0x3E0C, 0x0714, 0x0C38, 0x3D68, 0x00C4, 0x3FF4, + 0x0080, 0x3E20, 0x06BC, 0x0C80, 0x3D6C, 0x00C4, 0x3FF4, + 0x0078, 0x3E34, 0x0668, 0x0CC4, 0x3D70, 0x00C4, 0x3FF4, + 0x0074, 0x3E48, 0x0610, 0x0D08, 0x3D78, 0x00C4, 0x3FF0, + 0x006C, 0x3E5C, 0x05BC, 0x0D48, 0x3D80, 0x00C4, 0x3FF0, + 0x0068, 0x3E74, 0x0568, 0x0D84, 0x3D88, 0x00C0, 0x3FF0, + 0x0060, 0x3E88, 0x0514, 0x0DC8, 0x3D94, 0x00BC, 0x3FEC, + 0x0058, 0x3E9C, 0x04C0, 0x0E04, 0x3DA4, 0x00B8, 0x3FEC, + 0x0054, 0x3EB4, 0x046C, 0x0E38, 0x3DB4, 0x00B4, 0x3FEC, + 0x004C, 0x3ECC, 0x0418, 0x0E6C, 0x3DC8, 0x00B0, 0x3FEC, + 0x0044, 0x3EE0, 0x03C8, 0x0EA4, 0x3DDC, 0x00A8, 0x3FEC, + 0x0040, 0x3EF8, 0x0378, 0x0ED0, 0x3DF4, 0x00A0, 0x3FEC, + 0x0038, 0x3F0C, 0x032C, 0x0EFC, 0x3E10, 0x0098, 0x3FEC, + 0x0034, 0x3F24, 0x02DC, 0x0F24, 0x3E2C, 0x0090, 0x3FEC, + 0x002C, 0x3F38, 0x0294, 0x0F4C, 0x3E48, 0x0088, 0x3FEC, + 0x0028, 0x3F50, 0x0248, 0x0F68, 0x3E6C, 0x007C, 0x3FF0, + 0x0020, 0x3F64, 0x0200, 0x0F88, 0x3E90, 0x0074, 0x3FF0, + 0x001C, 0x3F7C, 0x01B8, 0x0FA4, 0x3EB4, 0x0068, 0x3FF0, + 0x0018, 0x3F90, 0x0174, 0x0FBC, 0x3EDC, 0x0058, 0x3FF4, + 0x0014, 0x3FA4, 0x0130, 0x0FD0, 0x3F08, 0x004C, 0x3FF4, + 0x000C, 0x3FB8, 0x00F0, 0x0FE4, 0x3F34, 0x003C, 0x3FF8, + 0x0008, 0x3FCC, 0x00B0, 0x0FF0, 0x3F64, 0x0030, 0x3FF8, + 0x0004, 0x3FDC, 0x0070, 0x0FFC, 0x3F98, 0x0020, 0x3FFC, + 0x0000, 0x3FF0, 0x0038, 0x0FFC, 0x3FCC, 0x0010, 0x0000, + 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000 +}; -static const uint16_t filter_7tap_64p_117[231] = { - 92, 15868, 2464, 2464, 15868, 92, 0, - 96, 15864, 2404, 2528, 15876, 88, 0, - 100, 15860, 2344, 2584, 15884, 84, 0, - 104, 15856, 2280, 2644, 15892, 76, 0, - 108, 15852, 2216, 2700, 15904, 72, 0, - 108, 15852, 2152, 2756, 15916, 64, 0, - 112, 15852, 2088, 2812, 15932, 60, 0, - 112, 15852, 2024, 2864, 15948, 52, 0, - 112, 15856, 1960, 2916, 15964, 44, 0, - 116, 15860, 1892, 2964, 15984, 36, 0, - 116, 15864, 1828, 3016, 16004, 24, 4, - 116, 15868, 1760, 3060, 16024, 16, 4, - 116, 15876, 1696, 3108, 16048, 8, 8, - 116, 15884, 1628, 3152, 16072, 16380, 8, - 112, 15892, 1564, 3192, 16100, 16372, 8, - 112, 15900, 1496, 3232, 16124, 16360, 12, - 112, 15908, 1428, 3268, 16156, 16348, 12, - 108, 15920, 1364, 3304, 16188, 16336, 16, - 108, 15928, 1300, 3340, 16220, 16324, 20, - 104, 15940, 1232, 3372, 16252, 16312, 20, - 104, 15952, 1168, 3400, 16288, 16300, 24, - 100, 15964, 1104, 3428, 16328, 16284, 28, - 96, 15980, 1040, 3452, 16364, 16272, 28, - 96, 15992, 976, 3476, 20, 16256, 32, - 92, 16004, 916, 3496, 64, 16244, 36, - 88, 16020, 856, 3516, 108, 16228, 40, - 84, 16032, 792, 3532, 152, 16216, 44, - 80, 16048, 732, 3544, 200, 16200, 48, - 80, 16064, 676, 3556, 248, 16184, 48, - 76, 16080, 616, 3564, 296, 16168, 52, - 72, 16092, 560, 3568, 344, 16156, 56, - 68, 16108, 504, 3572, 396, 16140, 60, - 64, 16124, 452, 3576, 452, 16124, 64 }; +//========================================= +// <num_taps> = 7 +// <num_phases> = 64 +// <scale_ratio> = 1.16666 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_7tap_64p_116[231] = { + 0x0020, 0x3E58, 0x0988, 0x0988, 0x3E58, 0x0020, 0x0000, + 0x0024, 0x3E4C, 0x0954, 0x09C0, 0x3E64, 0x0018, 0x0000, + 0x002C, 0x3E44, 0x091C, 0x09F4, 0x3E70, 0x0010, 0x0000, + 0x0030, 0x3E3C, 0x08E8, 0x0A24, 0x3E80, 0x0008, 0x0000, + 0x0034, 0x3E34, 0x08AC, 0x0A5C, 0x3E90, 0x0000, 0x0000, + 0x003C, 0x3E30, 0x0870, 0x0A84, 0x3EA0, 0x3FFC, 0x0004, + 0x0040, 0x3E28, 0x0838, 0x0AB4, 0x3EB4, 0x3FF4, 0x0004, + 0x0044, 0x3E24, 0x07FC, 0x0AE4, 0x3EC8, 0x3FEC, 0x0004, + 0x0048, 0x3E24, 0x07C4, 0x0B08, 0x3EDC, 0x3FE4, 0x0008, + 0x0048, 0x3E20, 0x0788, 0x0B3C, 0x3EF4, 0x3FD8, 0x0008, + 0x004C, 0x3E20, 0x074C, 0x0B60, 0x3F0C, 0x3FD0, 0x000C, + 0x0050, 0x3E20, 0x0710, 0x0B8C, 0x3F24, 0x3FC4, 0x000C, + 0x0050, 0x3E20, 0x06D4, 0x0BB0, 0x3F40, 0x3FBC, 0x0010, + 0x0054, 0x3E24, 0x0698, 0x0BD4, 0x3F5C, 0x3FB0, 0x0010, + 0x0054, 0x3E24, 0x065C, 0x0BFC, 0x3F78, 0x3FA4, 0x0014, + 0x0054, 0x3E28, 0x0624, 0x0C1C, 0x3F98, 0x3F98, 0x0014, + 0x0058, 0x3E2C, 0x05E4, 0x0C3C, 0x3FB8, 0x3F8C, 0x0018, + 0x0058, 0x3E34, 0x05A8, 0x0C58, 0x3FD8, 0x3F80, 0x001C, + 0x0058, 0x3E38, 0x0570, 0x0C78, 0x3FF8, 0x3F74, 0x001C, + 0x0058, 0x3E40, 0x0534, 0x0C94, 0x0018, 0x3F68, 0x0020, + 0x0058, 0x3E48, 0x04F4, 0x0CAC, 0x0040, 0x3F5C, 0x0024, + 0x0058, 0x3E50, 0x04BC, 0x0CC4, 0x0064, 0x3F50, 0x0024, + 0x0054, 0x3E58, 0x0484, 0x0CD8, 0x008C, 0x3F44, 0x0028, + 0x0054, 0x3E60, 0x0448, 0x0CEC, 0x00B4, 0x3F38, 0x002C, + 0x0054, 0x3E68, 0x0410, 0x0CFC, 0x00E0, 0x3F28, 0x0030, + 0x0054, 0x3E74, 0x03D4, 0x0D0C, 0x010C, 0x3F1C, 0x0030, + 0x0050, 0x3E7C, 0x03A0, 0x0D18, 0x0138, 0x3F10, 0x0034, + 0x0050, 0x3E88, 0x0364, 0x0D24, 0x0164, 0x3F04, 0x0038, + 0x004C, 0x3E94, 0x0330, 0x0D30, 0x0194, 0x3EF4, 0x0038, + 0x004C, 0x3EA0, 0x02F8, 0x0D34, 0x01C4, 0x3EE8, 0x003C, + 0x0048, 0x3EAC, 0x02C0, 0x0D3C, 0x01F4, 0x3EDC, 0x0040, + 0x0048, 0x3EB8, 0x0290, 0x0D3C, 0x0224, 0x3ED0, 0x0040, + 0x0044, 0x3EC4, 0x0258, 0x0D40, 0x0258, 0x3EC4, 0x0044 +}; -static const uint16_t filter_7tap_64p_150[231] = { - 16224, 16380, 2208, 2208, 16380, 16224, 0, - 16232, 16360, 2172, 2236, 16, 16216, 0, - 16236, 16340, 2140, 2268, 40, 16212, 0, - 16244, 16324, 2104, 2296, 60, 16204, 4, - 16252, 16304, 2072, 2324, 84, 16196, 4, - 16256, 16288, 2036, 2352, 108, 16192, 4, - 16264, 16268, 2000, 2380, 132, 16184, 8, - 16272, 16252, 1960, 2408, 160, 16176, 8, - 16276, 16240, 1924, 2432, 184, 16172, 8, - 16284, 16224, 1888, 2456, 212, 16164, 8, - 16288, 16212, 1848, 2480, 240, 16160, 12, - 16296, 16196, 1812, 2500, 268, 16152, 12, - 16300, 16184, 1772, 2524, 296, 16144, 12, - 16308, 16172, 1736, 2544, 324, 16140, 12, - 16312, 16164, 1696, 2564, 356, 16136, 12, - 16320, 16152, 1656, 2584, 388, 16128, 12, - 16324, 16144, 1616, 2600, 416, 16124, 12, - 16328, 16136, 1576, 2616, 448, 16116, 12, - 16332, 16128, 1536, 2632, 480, 16112, 12, - 16340, 16120, 1496, 2648, 516, 16108, 12, - 16344, 16112, 1456, 2660, 548, 16104, 12, - 16348, 16104, 1416, 2672, 580, 16100, 12, - 16352, 16100, 1376, 2684, 616, 16096, 12, - 16356, 16096, 1336, 2696, 652, 16092, 12, - 16360, 16092, 1296, 2704, 688, 16088, 12, - 16364, 16088, 1256, 2712, 720, 16084, 12, - 16368, 16084, 1220, 2720, 760, 16084, 8, - 16368, 16080, 1180, 2724, 796, 16080, 8, - 16372, 16080, 1140, 2732, 832, 16080, 8, - 16376, 16076, 1100, 2732, 868, 16076, 4, - 16380, 16076, 1060, 2736, 908, 16076, 4, - 16380, 16076, 1020, 2740, 944, 16076, 0, - 0, 16076, 984, 2740, 984, 16076, 0 }; +//========================================= +// <num_taps> = 7 +// <num_phases> = 64 +// <scale_ratio> = 1.49999 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_7tap_64p_149[231] = { + 0x3F68, 0x3FEC, 0x08A8, 0x08AC, 0x3FF0, 0x3F68, 0x0000, + 0x3F70, 0x3FDC, 0x0888, 0x08CC, 0x0000, 0x3F60, 0x0000, + 0x3F74, 0x3FC8, 0x0868, 0x08F0, 0x0014, 0x3F58, 0x0000, + 0x3F7C, 0x3FB4, 0x0844, 0x0908, 0x002C, 0x3F54, 0x0004, + 0x3F84, 0x3FA4, 0x0820, 0x0924, 0x0044, 0x3F4C, 0x0004, + 0x3F88, 0x3F90, 0x0800, 0x0944, 0x005C, 0x3F44, 0x0004, + 0x3F90, 0x3F80, 0x07D8, 0x095C, 0x0074, 0x3F40, 0x0008, + 0x3F98, 0x3F70, 0x07B0, 0x097C, 0x008C, 0x3F38, 0x0008, + 0x3F9C, 0x3F60, 0x0790, 0x0994, 0x00A8, 0x3F30, 0x0008, + 0x3FA4, 0x3F54, 0x0764, 0x09B0, 0x00C4, 0x3F28, 0x0008, + 0x3FA8, 0x3F48, 0x0740, 0x09C4, 0x00DC, 0x3F24, 0x000C, + 0x3FB0, 0x3F38, 0x0718, 0x09DC, 0x00FC, 0x3F1C, 0x000C, + 0x3FB4, 0x3F2C, 0x06F0, 0x09F4, 0x0118, 0x3F18, 0x000C, + 0x3FBC, 0x3F24, 0x06C8, 0x0A08, 0x0134, 0x3F10, 0x000C, + 0x3FC0, 0x3F18, 0x06A0, 0x0A1C, 0x0154, 0x3F08, 0x0010, + 0x3FC8, 0x3F10, 0x0678, 0x0A2C, 0x0170, 0x3F04, 0x0010, + 0x3FCC, 0x3F04, 0x0650, 0x0A40, 0x0190, 0x3F00, 0x0010, + 0x3FD0, 0x3EFC, 0x0628, 0x0A54, 0x01B0, 0x3EF8, 0x0010, + 0x3FD4, 0x3EF4, 0x0600, 0x0A64, 0x01D0, 0x3EF4, 0x0010, + 0x3FDC, 0x3EEC, 0x05D8, 0x0A6C, 0x01F4, 0x3EF0, 0x0010, + 0x3FE0, 0x3EE8, 0x05B0, 0x0A7C, 0x0214, 0x3EE8, 0x0010, + 0x3FE4, 0x3EE0, 0x0588, 0x0A88, 0x0238, 0x3EE4, 0x0010, + 0x3FE8, 0x3EDC, 0x055C, 0x0A98, 0x0258, 0x3EE0, 0x0010, + 0x3FEC, 0x3ED8, 0x0534, 0x0AA0, 0x027C, 0x3EDC, 0x0010, + 0x3FF0, 0x3ED4, 0x050C, 0x0AAC, 0x02A0, 0x3ED8, 0x000C, + 0x3FF4, 0x3ED0, 0x04E4, 0x0AB4, 0x02C4, 0x3ED4, 0x000C, + 0x3FF4, 0x3ECC, 0x04C0, 0x0ABC, 0x02E8, 0x3ED0, 0x000C, + 0x3FF8, 0x3ECC, 0x0494, 0x0AC0, 0x030C, 0x3ED0, 0x000C, + 0x3FFC, 0x3EC8, 0x046C, 0x0AC8, 0x0334, 0x3ECC, 0x0008, + 0x0000, 0x3EC8, 0x0444, 0x0AC8, 0x0358, 0x3ECC, 0x0008, + 0x0000, 0x3EC8, 0x041C, 0x0ACC, 0x0380, 0x3EC8, 0x0008, + 0x0000, 0x3EC8, 0x03F4, 0x0AD0, 0x03A8, 0x3EC8, 0x0004, + 0x0004, 0x3EC8, 0x03CC, 0x0AD0, 0x03CC, 0x3EC8, 0x0004 +}; +//========================================= +// <num_taps> = 7 +// <num_phases> = 64 +// <scale_ratio> = 1.83332 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_7tap_64p_183[231] = { - 16216, 324, 1884, 1884, 324, 16216, 0, - 16220, 304, 1864, 1904, 344, 16216, 0, - 16224, 284, 1844, 1924, 364, 16216, 0, - 16224, 264, 1824, 1944, 384, 16212, 16380, - 16228, 248, 1804, 1960, 408, 16212, 16380, - 16228, 228, 1784, 1976, 428, 16208, 16380, - 16232, 212, 1760, 1996, 452, 16208, 16380, - 16236, 192, 1740, 2012, 472, 16208, 16376, - 16240, 176, 1716, 2028, 496, 16208, 16376, - 16240, 160, 1696, 2040, 516, 16208, 16376, - 16244, 144, 1672, 2056, 540, 16208, 16376, - 16248, 128, 1648, 2068, 564, 16208, 16372, - 16252, 112, 1624, 2084, 588, 16208, 16372, - 16256, 96, 1600, 2096, 612, 16208, 16368, - 16256, 84, 1576, 2108, 636, 16208, 16368, - 16260, 68, 1552, 2120, 660, 16208, 16368, - 16264, 56, 1524, 2132, 684, 16212, 16364, - 16268, 40, 1500, 2140, 712, 16212, 16364, - 16272, 28, 1476, 2152, 736, 16216, 16360, - 16276, 16, 1448, 2160, 760, 16216, 16356, - 16280, 4, 1424, 2168, 788, 16220, 16356, - 16284, 16376, 1396, 2176, 812, 16224, 16352, - 16288, 16368, 1372, 2184, 840, 16224, 16352, - 16292, 16356, 1344, 2188, 864, 16228, 16348, - 16292, 16344, 1320, 2196, 892, 16232, 16344, - 16296, 16336, 1292, 2200, 916, 16236, 16344, - 16300, 16324, 1264, 2204, 944, 16240, 16340, - 16304, 16316, 1240, 2208, 972, 16248, 16336, - 16308, 16308, 1212, 2212, 996, 16252, 16332, - 16312, 16300, 1184, 2216, 1024, 16256, 16332, - 16316, 16292, 1160, 2216, 1052, 16264, 16328, - 16316, 16284, 1132, 2216, 1076, 16268, 16324, - 16320, 16276, 1104, 2216, 1104, 16276, 16320 }; + 0x3FA4, 0x01E8, 0x0674, 0x0674, 0x01E8, 0x3FA4, 0x0000, + 0x3FA4, 0x01D4, 0x0668, 0x0684, 0x01F8, 0x3FA4, 0x0000, + 0x3FA4, 0x01C4, 0x0658, 0x0690, 0x0208, 0x3FA8, 0x0000, + 0x3FA0, 0x01B4, 0x064C, 0x06A0, 0x021C, 0x3FA8, 0x3FFC, + 0x3FA0, 0x01A4, 0x063C, 0x06AC, 0x022C, 0x3FAC, 0x3FFC, + 0x3FA0, 0x0194, 0x0630, 0x06B4, 0x0240, 0x3FAC, 0x3FFC, + 0x3FA0, 0x0184, 0x0620, 0x06C4, 0x0250, 0x3FB0, 0x3FF8, + 0x3FA0, 0x0174, 0x0614, 0x06CC, 0x0264, 0x3FB0, 0x3FF8, + 0x3FA0, 0x0164, 0x0604, 0x06D8, 0x0278, 0x3FB4, 0x3FF4, + 0x3FA0, 0x0154, 0x05F4, 0x06E4, 0x0288, 0x3FB8, 0x3FF4, + 0x3FA0, 0x0148, 0x05E4, 0x06EC, 0x029C, 0x3FBC, 0x3FF0, + 0x3FA0, 0x0138, 0x05D4, 0x06F4, 0x02B0, 0x3FC0, 0x3FF0, + 0x3FA0, 0x0128, 0x05C4, 0x0704, 0x02C4, 0x3FC0, 0x3FEC, + 0x3FA0, 0x011C, 0x05B4, 0x0708, 0x02D8, 0x3FC4, 0x3FEC, + 0x3FA4, 0x010C, 0x05A4, 0x0714, 0x02E8, 0x3FC8, 0x3FE8, + 0x3FA4, 0x0100, 0x0590, 0x0718, 0x02FC, 0x3FD0, 0x3FE8, + 0x3FA4, 0x00F0, 0x0580, 0x0724, 0x0310, 0x3FD4, 0x3FE4, + 0x3FA4, 0x00E4, 0x056C, 0x072C, 0x0324, 0x3FD8, 0x3FE4, + 0x3FA8, 0x00D8, 0x055C, 0x0730, 0x0338, 0x3FDC, 0x3FE0, + 0x3FA8, 0x00CC, 0x0548, 0x0738, 0x034C, 0x3FE4, 0x3FDC, + 0x3FA8, 0x00BC, 0x0538, 0x0740, 0x0360, 0x3FE8, 0x3FDC, + 0x3FAC, 0x00B0, 0x0528, 0x0744, 0x0374, 0x3FEC, 0x3FD8, + 0x3FAC, 0x00A4, 0x0514, 0x0748, 0x0388, 0x3FF4, 0x3FD8, + 0x3FB0, 0x0098, 0x0500, 0x074C, 0x039C, 0x3FFC, 0x3FD4, + 0x3FB0, 0x0090, 0x04EC, 0x0750, 0x03B0, 0x0000, 0x3FD4, + 0x3FB0, 0x0084, 0x04DC, 0x0758, 0x03C4, 0x0004, 0x3FD0, + 0x3FB4, 0x0078, 0x04CC, 0x0758, 0x03D8, 0x000C, 0x3FCC, + 0x3FB4, 0x006C, 0x04B8, 0x075C, 0x03EC, 0x0014, 0x3FCC, + 0x3FB8, 0x0064, 0x04A0, 0x0760, 0x0400, 0x001C, 0x3FC8, + 0x3FB8, 0x0058, 0x0490, 0x0760, 0x0414, 0x0024, 0x3FC8, + 0x3FBC, 0x0050, 0x047C, 0x0760, 0x0428, 0x002C, 0x3FC4, + 0x3FBC, 0x0048, 0x0464, 0x0764, 0x043C, 0x0034, 0x3FC4, + 0x3FC0, 0x003C, 0x0454, 0x0764, 0x0450, 0x003C, 0x3FC0 +}; +//========================================= +// <num_taps> = 8 +// <num_phases> = 64 +// <scale_ratio> = 0.83333 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_8tap_64p_upscale[264] = { - 0, 0, 0, 4096, 0, 0, 0, 0, - 16376, 20, 16328, 4092, 56, 16364, 4, 0, - 16372, 36, 16272, 4088, 116, 16340, 12, 0, - 16364, 56, 16220, 4080, 180, 16320, 20, 0, - 16360, 76, 16172, 4064, 244, 16296, 24, 16380, - 16356, 92, 16124, 4048, 312, 16276, 32, 16380, - 16352, 108, 16080, 4032, 380, 16252, 40, 16380, - 16344, 124, 16036, 4008, 452, 16228, 48, 16380, - 16340, 136, 15996, 3980, 524, 16204, 56, 16380, - 16340, 152, 15956, 3952, 600, 16180, 64, 16376, - 16336, 164, 15920, 3920, 672, 16156, 76, 16376, - 16332, 176, 15888, 3884, 752, 16132, 84, 16376, - 16328, 188, 15860, 3844, 828, 16104, 92, 16372, - 16328, 200, 15828, 3800, 908, 16080, 100, 16372, - 16324, 208, 15804, 3756, 992, 16056, 108, 16372, - 16324, 216, 15780, 3708, 1072, 16032, 120, 16368, - 16320, 224, 15760, 3656, 1156, 16008, 128, 16368, - 16320, 232, 15740, 3604, 1240, 15984, 136, 16364, - 16320, 240, 15724, 3548, 1324, 15960, 144, 16364, - 16320, 244, 15708, 3488, 1412, 15936, 152, 16360, - 16320, 248, 15696, 3428, 1496, 15912, 160, 16360, - 16320, 252, 15688, 3364, 1584, 15892, 172, 16356, - 16320, 256, 15680, 3296, 1672, 15868, 180, 16352, - 16320, 256, 15672, 3228, 1756, 15848, 188, 16352, - 16320, 256, 15668, 3156, 1844, 15828, 192, 16348, - 16320, 260, 15668, 3084, 1932, 15808, 200, 16348, - 16320, 256, 15668, 3012, 2020, 15792, 208, 16344, - 16324, 256, 15668, 2936, 2108, 15772, 216, 16344, - 16324, 256, 15672, 2856, 2192, 15756, 220, 16340, - 16324, 252, 15676, 2776, 2280, 15740, 228, 16336, - 16328, 252, 15684, 2696, 2364, 15728, 232, 16336, - 16328, 248, 15692, 2616, 2448, 15716, 240, 16332, - 16332, 244, 15704, 2532, 2532, 15704, 244, 16332 }; + 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x3FFC, 0x0014, 0x3FC8, 0x1000, 0x0038, 0x3FEC, 0x0004, 0x0000, + 0x3FF4, 0x0024, 0x3F94, 0x0FFC, 0x0074, 0x3FD8, 0x000C, 0x0000, + 0x3FF0, 0x0038, 0x3F60, 0x0FEC, 0x00B4, 0x3FC4, 0x0014, 0x0000, + 0x3FEC, 0x004C, 0x3F2C, 0x0FE4, 0x00F4, 0x3FAC, 0x0018, 0x0000, + 0x3FE4, 0x005C, 0x3F00, 0x0FD4, 0x0138, 0x3F94, 0x0020, 0x0000, + 0x3FE0, 0x006C, 0x3ED0, 0x0FC4, 0x017C, 0x3F7C, 0x0028, 0x0000, + 0x3FDC, 0x007C, 0x3EA8, 0x0FA4, 0x01C4, 0x3F68, 0x0030, 0x0000, + 0x3FD8, 0x0088, 0x3E80, 0x0F90, 0x020C, 0x3F50, 0x0038, 0x3FFC, + 0x3FD4, 0x0098, 0x3E58, 0x0F70, 0x0258, 0x3F38, 0x0040, 0x3FFC, + 0x3FD0, 0x00A4, 0x3E34, 0x0F54, 0x02A0, 0x3F1C, 0x004C, 0x3FFC, + 0x3FD0, 0x00B0, 0x3E14, 0x0F28, 0x02F0, 0x3F04, 0x0054, 0x3FFC, + 0x3FCC, 0x00BC, 0x3DF4, 0x0F08, 0x033C, 0x3EEC, 0x005C, 0x3FF8, + 0x3FC8, 0x00C8, 0x3DD8, 0x0EDC, 0x038C, 0x3ED4, 0x0064, 0x3FF8, + 0x3FC8, 0x00D0, 0x3DC0, 0x0EAC, 0x03E0, 0x3EBC, 0x006C, 0x3FF4, + 0x3FC4, 0x00D8, 0x3DA8, 0x0E7C, 0x0430, 0x3EA4, 0x0078, 0x3FF4, + 0x3FC4, 0x00E0, 0x3D94, 0x0E48, 0x0484, 0x3E8C, 0x0080, 0x3FF0, + 0x3FC4, 0x00E8, 0x3D80, 0x0E10, 0x04D8, 0x3E74, 0x0088, 0x3FF0, + 0x3FC4, 0x00F0, 0x3D70, 0x0DD8, 0x052C, 0x3E5C, 0x0090, 0x3FEC, + 0x3FC0, 0x00F4, 0x3D60, 0x0DA0, 0x0584, 0x3E44, 0x0098, 0x3FEC, + 0x3FC0, 0x00F8, 0x3D54, 0x0D68, 0x05D8, 0x3E2C, 0x00A0, 0x3FE8, + 0x3FC0, 0x00FC, 0x3D48, 0x0D20, 0x0630, 0x3E18, 0x00AC, 0x3FE8, + 0x3FC0, 0x0100, 0x3D40, 0x0CE0, 0x0688, 0x3E00, 0x00B4, 0x3FE4, + 0x3FC4, 0x0100, 0x3D3C, 0x0C98, 0x06DC, 0x3DEC, 0x00BC, 0x3FE4, + 0x3FC4, 0x0100, 0x3D38, 0x0C58, 0x0734, 0x3DD8, 0x00C0, 0x3FE0, + 0x3FC4, 0x0104, 0x3D38, 0x0C0C, 0x078C, 0x3DC4, 0x00C8, 0x3FDC, + 0x3FC4, 0x0100, 0x3D38, 0x0BC4, 0x07E4, 0x3DB0, 0x00D0, 0x3FDC, + 0x3FC4, 0x0100, 0x3D38, 0x0B78, 0x083C, 0x3DA0, 0x00D8, 0x3FD8, + 0x3FC8, 0x0100, 0x3D3C, 0x0B28, 0x0890, 0x3D90, 0x00DC, 0x3FD8, + 0x3FC8, 0x00FC, 0x3D40, 0x0ADC, 0x08E8, 0x3D80, 0x00E4, 0x3FD4, + 0x3FCC, 0x00FC, 0x3D48, 0x0A84, 0x093C, 0x3D74, 0x00E8, 0x3FD4, + 0x3FCC, 0x00F8, 0x3D50, 0x0A38, 0x0990, 0x3D64, 0x00F0, 0x3FD0, + 0x3FD0, 0x00F4, 0x3D58, 0x09E0, 0x09E4, 0x3D5C, 0x00F4, 0x3FD0 +}; -static const uint16_t filter_8tap_64p_117[264] = { - 116, 16100, 428, 3564, 428, 16100, 116, 0, - 112, 16116, 376, 3564, 484, 16084, 120, 16380, - 104, 16136, 324, 3560, 540, 16064, 124, 16380, - 100, 16152, 272, 3556, 600, 16048, 128, 16380, - 96, 16168, 220, 3548, 656, 16032, 136, 16376, - 88, 16188, 172, 3540, 716, 16016, 140, 16376, - 84, 16204, 124, 3528, 780, 16000, 144, 16376, - 80, 16220, 76, 3512, 840, 15984, 148, 16372, - 76, 16236, 32, 3496, 904, 15968, 152, 16372, - 68, 16252, 16376, 3480, 968, 15952, 156, 16372, - 64, 16268, 16332, 3456, 1032, 15936, 160, 16372, - 60, 16284, 16292, 3432, 1096, 15920, 164, 16368, - 56, 16300, 16252, 3408, 1164, 15908, 164, 16368, - 48, 16316, 16216, 3380, 1228, 15892, 168, 16368, - 44, 16332, 16180, 3348, 1296, 15880, 168, 16368, - 40, 16348, 16148, 3316, 1364, 15868, 172, 16364, - 36, 16360, 16116, 3284, 1428, 15856, 172, 16364, - 32, 16376, 16084, 3248, 1496, 15848, 176, 16364, - 28, 4, 16052, 3208, 1564, 15836, 176, 16364, - 24, 16, 16028, 3168, 1632, 15828, 176, 16364, - 20, 28, 16000, 3124, 1700, 15820, 176, 16364, - 16, 40, 15976, 3080, 1768, 15812, 176, 16364, - 12, 52, 15952, 3036, 1836, 15808, 176, 16364, - 8, 64, 15932, 2988, 1904, 15800, 176, 16364, - 4, 76, 15912, 2940, 1972, 15800, 172, 16364, - 4, 84, 15892, 2888, 2040, 15796, 172, 16364, - 0, 96, 15876, 2836, 2104, 15792, 168, 16364, - 16380, 104, 15864, 2780, 2172, 15792, 164, 16364, - 16380, 112, 15848, 2724, 2236, 15792, 160, 16364, - 16376, 120, 15836, 2668, 2300, 15796, 156, 16368, - 16376, 128, 15828, 2608, 2364, 15800, 152, 16368, - 16372, 136, 15816, 2548, 2428, 15804, 148, 16368, - 16372, 140, 15812, 2488, 2488, 15812, 140, 16372 }; +//========================================= +// <num_taps> = 8 +// <num_phases> = 64 +// <scale_ratio> = 1.16666 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_8tap_64p_116[264] = { + 0x0080, 0x3E90, 0x0268, 0x0D14, 0x0264, 0x3E90, 0x0080, 0x0000, + 0x007C, 0x3E9C, 0x0238, 0x0D14, 0x0298, 0x3E84, 0x0080, 0x0000, + 0x0078, 0x3EAC, 0x0200, 0x0D10, 0x02D0, 0x3E78, 0x0084, 0x0000, + 0x0078, 0x3EB8, 0x01D0, 0x0D0C, 0x0304, 0x3E6C, 0x0084, 0x0000, + 0x0074, 0x3EC8, 0x01A0, 0x0D00, 0x033C, 0x3E60, 0x0088, 0x0000, + 0x0070, 0x3ED4, 0x0170, 0x0D00, 0x0374, 0x3E54, 0x0088, 0x3FFC, + 0x006C, 0x3EE4, 0x0140, 0x0CF8, 0x03AC, 0x3E48, 0x0088, 0x3FFC, + 0x006C, 0x3EF0, 0x0114, 0x0CE8, 0x03E4, 0x3E3C, 0x008C, 0x3FFC, + 0x0068, 0x3F00, 0x00E8, 0x0CD8, 0x041C, 0x3E34, 0x008C, 0x3FFC, + 0x0064, 0x3F10, 0x00BC, 0x0CCC, 0x0454, 0x3E28, 0x008C, 0x3FFC, + 0x0060, 0x3F1C, 0x0090, 0x0CBC, 0x0490, 0x3E20, 0x008C, 0x3FFC, + 0x005C, 0x3F2C, 0x0068, 0x0CA4, 0x04CC, 0x3E18, 0x008C, 0x3FFC, + 0x0058, 0x3F38, 0x0040, 0x0C94, 0x0504, 0x3E10, 0x008C, 0x3FFC, + 0x0054, 0x3F48, 0x001C, 0x0C7C, 0x0540, 0x3E08, 0x0088, 0x3FFC, + 0x0050, 0x3F54, 0x3FF8, 0x0C60, 0x057C, 0x3E04, 0x0088, 0x3FFC, + 0x004C, 0x3F64, 0x3FD4, 0x0C44, 0x05B8, 0x3DFC, 0x0088, 0x3FFC, + 0x0048, 0x3F70, 0x3FB4, 0x0C28, 0x05F4, 0x3DF8, 0x0084, 0x3FFC, + 0x0044, 0x3F80, 0x3F90, 0x0C0C, 0x0630, 0x3DF4, 0x0080, 0x3FFC, + 0x0040, 0x3F8C, 0x3F70, 0x0BE8, 0x066C, 0x3DF4, 0x0080, 0x3FFC, + 0x003C, 0x3F9C, 0x3F50, 0x0BC8, 0x06A8, 0x3DF0, 0x007C, 0x3FFC, + 0x0038, 0x3FA8, 0x3F34, 0x0BA0, 0x06E4, 0x3DF0, 0x0078, 0x0000, + 0x0034, 0x3FB4, 0x3F18, 0x0B80, 0x071C, 0x3DF0, 0x0074, 0x0000, + 0x0030, 0x3FC0, 0x3EFC, 0x0B5C, 0x0758, 0x3DF0, 0x0070, 0x0000, + 0x002C, 0x3FCC, 0x3EE4, 0x0B34, 0x0794, 0x3DF4, 0x0068, 0x0000, + 0x002C, 0x3FDC, 0x3ECC, 0x0B08, 0x07CC, 0x3DF4, 0x0064, 0x0000, + 0x0028, 0x3FE4, 0x3EB4, 0x0AE0, 0x0808, 0x3DF8, 0x0060, 0x0000, + 0x0024, 0x3FF0, 0x3EA0, 0x0AB0, 0x0840, 0x3E00, 0x0058, 0x0004, + 0x0020, 0x3FFC, 0x3E90, 0x0A84, 0x0878, 0x3E04, 0x0050, 0x0004, + 0x001C, 0x0004, 0x3E7C, 0x0A54, 0x08B0, 0x3E0C, 0x004C, 0x0008, + 0x0018, 0x000C, 0x3E68, 0x0A28, 0x08E8, 0x3E18, 0x0044, 0x0008, + 0x0018, 0x0018, 0x3E54, 0x09F4, 0x0920, 0x3E20, 0x003C, 0x000C, + 0x0014, 0x0020, 0x3E48, 0x09C0, 0x0954, 0x3E2C, 0x0034, 0x0010, + 0x0010, 0x002C, 0x3E3C, 0x098C, 0x0988, 0x3E38, 0x002C, 0x0010 +}; -static const uint16_t filter_8tap_64p_150[264] = { - 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0, - 0, 16020, 992, 2756, 1068, 16024, 16376, 0, - 4, 16020, 952, 2752, 1108, 16024, 16372, 0, - 8, 16020, 916, 2748, 1148, 16028, 16368, 0, - 12, 16020, 876, 2744, 1184, 16032, 16364, 4, - 16, 16020, 840, 2740, 1224, 16036, 16356, 4, - 20, 16024, 800, 2732, 1264, 16040, 16352, 4, - 20, 16024, 764, 2724, 1304, 16044, 16348, 8, - 24, 16028, 728, 2716, 1344, 16052, 16340, 8, - 28, 16028, 692, 2704, 1380, 16056, 16336, 12, - 28, 16032, 656, 2696, 1420, 16064, 16328, 12, - 32, 16036, 620, 2684, 1460, 16072, 16324, 12, - 36, 16040, 584, 2668, 1500, 16080, 16316, 16, - 36, 16044, 548, 2656, 1536, 16088, 16308, 16, - 36, 16048, 516, 2640, 1576, 16096, 16304, 20, - 40, 16052, 480, 2624, 1612, 16108, 16296, 20, - 40, 16060, 448, 2608, 1652, 16120, 16288, 20, - 44, 16064, 416, 2588, 1692, 16132, 16280, 24, - 44, 16068, 384, 2568, 1728, 16144, 16276, 24, - 44, 16076, 352, 2548, 1764, 16156, 16268, 28, - 44, 16080, 320, 2528, 1804, 16168, 16260, 28, - 44, 16088, 292, 2508, 1840, 16184, 16252, 28, - 44, 16096, 264, 2484, 1876, 16200, 16244, 32, - 48, 16100, 232, 2460, 1912, 16216, 16236, 32, - 48, 16108, 204, 2436, 1948, 16232, 16228, 32, - 48, 16116, 176, 2412, 1980, 16248, 16220, 36, - 48, 16124, 152, 2384, 2016, 16264, 16216, 36, - 44, 16128, 124, 2356, 2052, 16284, 16208, 36, - 44, 16136, 100, 2328, 2084, 16304, 16200, 40, - 44, 16144, 72, 2300, 2116, 16324, 16192, 40, - 44, 16152, 48, 2272, 2148, 16344, 16184, 40, - 44, 16160, 24, 2244, 2180, 16364, 16176, 40, - 44, 16168, 4, 2212, 2212, 4, 16168, 44 }; +//========================================= +// <num_taps> = 8 +// <num_phases> = 64 +// <scale_ratio> = 1.49999 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= +static const uint16_t filter_8tap_64p_149[264] = { + 0x0008, 0x3E8C, 0x03F8, 0x0AE8, 0x03F8, 0x3E8C, 0x0008, 0x0000, + 0x000C, 0x3E8C, 0x03D0, 0x0AE8, 0x0420, 0x3E90, 0x0000, 0x0000, + 0x000C, 0x3E8C, 0x03AC, 0x0AE8, 0x0444, 0x3E90, 0x0000, 0x0000, + 0x0010, 0x3E90, 0x0384, 0x0AE0, 0x046C, 0x3E94, 0x3FFC, 0x0000, + 0x0014, 0x3E90, 0x035C, 0x0ADC, 0x0494, 0x3E94, 0x3FF8, 0x0004, + 0x0018, 0x3E90, 0x0334, 0x0AD8, 0x04BC, 0x3E98, 0x3FF4, 0x0004, + 0x001C, 0x3E94, 0x0310, 0x0AD0, 0x04E4, 0x3E9C, 0x3FEC, 0x0004, + 0x0020, 0x3E98, 0x02E8, 0x0AC4, 0x050C, 0x3EA0, 0x3FE8, 0x0008, + 0x0020, 0x3E98, 0x02C4, 0x0AC0, 0x0534, 0x3EA4, 0x3FE4, 0x0008, + 0x0024, 0x3E9C, 0x02A0, 0x0AB4, 0x055C, 0x3EAC, 0x3FDC, 0x0008, + 0x0024, 0x3EA0, 0x027C, 0x0AA8, 0x0584, 0x3EB0, 0x3FD8, 0x000C, + 0x0028, 0x3EA4, 0x0258, 0x0A9C, 0x05AC, 0x3EB8, 0x3FD0, 0x000C, + 0x0028, 0x3EA8, 0x0234, 0x0A90, 0x05D4, 0x3EC0, 0x3FC8, 0x0010, + 0x002C, 0x3EAC, 0x0210, 0x0A80, 0x05FC, 0x3EC8, 0x3FC4, 0x0010, + 0x002C, 0x3EB4, 0x01F0, 0x0A70, 0x0624, 0x3ED0, 0x3FBC, 0x0010, + 0x002C, 0x3EB8, 0x01CC, 0x0A60, 0x064C, 0x3EDC, 0x3FB4, 0x0014, + 0x0030, 0x3EBC, 0x01A8, 0x0A50, 0x0674, 0x3EE4, 0x3FB0, 0x0014, + 0x0030, 0x3EC4, 0x0188, 0x0A38, 0x069C, 0x3EF0, 0x3FA8, 0x0018, + 0x0030, 0x3ECC, 0x0168, 0x0A28, 0x06C0, 0x3EFC, 0x3FA0, 0x0018, + 0x0030, 0x3ED0, 0x0148, 0x0A14, 0x06E8, 0x3F08, 0x3F98, 0x001C, + 0x0030, 0x3ED8, 0x012C, 0x0A00, 0x070C, 0x3F14, 0x3F90, 0x001C, + 0x0034, 0x3EE0, 0x0108, 0x09E4, 0x0734, 0x3F24, 0x3F8C, 0x001C, + 0x0034, 0x3EE4, 0x00EC, 0x09CC, 0x0758, 0x3F34, 0x3F84, 0x0020, + 0x0034, 0x3EEC, 0x00D0, 0x09B8, 0x077C, 0x3F40, 0x3F7C, 0x0020, + 0x0034, 0x3EF4, 0x00B4, 0x0998, 0x07A4, 0x3F50, 0x3F74, 0x0024, + 0x0030, 0x3EFC, 0x0098, 0x0980, 0x07C8, 0x3F64, 0x3F6C, 0x0024, + 0x0030, 0x3F04, 0x0080, 0x0968, 0x07E8, 0x3F74, 0x3F64, 0x0024, + 0x0030, 0x3F0C, 0x0060, 0x094C, 0x080C, 0x3F88, 0x3F5C, 0x0028, + 0x0030, 0x3F14, 0x0048, 0x0930, 0x0830, 0x3F98, 0x3F54, 0x0028, + 0x0030, 0x3F1C, 0x0030, 0x0914, 0x0850, 0x3FAC, 0x3F4C, 0x0028, + 0x0030, 0x3F24, 0x0018, 0x08F0, 0x0874, 0x3FC0, 0x3F44, 0x002C, + 0x002C, 0x3F2C, 0x0000, 0x08D4, 0x0894, 0x3FD8, 0x3F3C, 0x002C, + 0x002C, 0x3F34, 0x3FEC, 0x08B4, 0x08B4, 0x3FEC, 0x3F34, 0x002C +}; +//========================================= +// <num_taps> = 8 +// <num_phases> = 64 +// <scale_ratio> = 1.83332 (input/output) +// <sharpness> = 0 +// <CoefType> = ModifiedLanczos +// <CoefQuant> = 1.10 +// <CoefOut> = 1.12 +//========================================= static const uint16_t filter_8tap_64p_183[264] = { - 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0, - 16268, 16256, 1136, 2240, 1188, 16272, 16260, 0, - 16272, 16248, 1108, 2240, 1216, 16280, 16256, 0, - 16276, 16240, 1080, 2236, 1240, 16292, 16252, 0, - 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0, - 16284, 16224, 1028, 2232, 1292, 16312, 16244, 0, - 16288, 16216, 1000, 2228, 1320, 16324, 16240, 0, - 16292, 16212, 976, 2224, 1344, 16336, 16236, 0, - 16296, 16204, 948, 2220, 1372, 16348, 16232, 0, - 16300, 16200, 920, 2212, 1396, 16360, 16228, 4, - 16304, 16196, 896, 2204, 1424, 16372, 16224, 4, - 16308, 16188, 868, 2200, 1448, 0, 16220, 4, - 16312, 16184, 844, 2192, 1472, 12, 16216, 4, - 16316, 16180, 816, 2184, 1500, 28, 16212, 4, - 16320, 16176, 792, 2172, 1524, 40, 16208, 4, - 16324, 16172, 764, 2164, 1548, 56, 16204, 0, - 16328, 16172, 740, 2156, 1572, 72, 16200, 0, - 16328, 16168, 712, 2144, 1596, 88, 16196, 0, - 16332, 16164, 688, 2132, 1620, 100, 16192, 0, - 16336, 16164, 664, 2120, 1644, 120, 16192, 0, - 16340, 16160, 640, 2108, 1668, 136, 16188, 0, - 16344, 16160, 616, 2096, 1688, 152, 16184, 0, - 16344, 16160, 592, 2080, 1712, 168, 16180, 0, - 16348, 16156, 568, 2068, 1736, 188, 16176, 16380, - 16352, 16156, 544, 2052, 1756, 204, 16176, 16380, - 16352, 16156, 520, 2036, 1780, 224, 16172, 16380, - 16356, 16156, 496, 2024, 1800, 244, 16172, 16380, - 16360, 16156, 472, 2008, 1820, 260, 16168, 16376, - 16360, 16156, 452, 1988, 1840, 280, 16164, 16376, - 16364, 16156, 428, 1972, 1860, 300, 16164, 16376, - 16364, 16156, 408, 1956, 1880, 320, 16164, 16372, - 16368, 16160, 384, 1936, 1900, 344, 16160, 16372, - 16368, 16160, 364, 1920, 1920, 364, 16160, 16368 }; + 0x3F88, 0x0048, 0x047C, 0x0768, 0x047C, 0x0048, 0x3F88, 0x0000, + 0x3F88, 0x003C, 0x0468, 0x076C, 0x0490, 0x0054, 0x3F84, 0x0000, + 0x3F8C, 0x0034, 0x0454, 0x0768, 0x04A4, 0x005C, 0x3F84, 0x0000, + 0x3F8C, 0x0028, 0x0444, 0x076C, 0x04B4, 0x0068, 0x3F80, 0x0000, + 0x3F90, 0x0020, 0x042C, 0x0768, 0x04C8, 0x0074, 0x3F80, 0x0000, + 0x3F90, 0x0018, 0x041C, 0x0764, 0x04DC, 0x0080, 0x3F7C, 0x0000, + 0x3F94, 0x0010, 0x0408, 0x075C, 0x04F0, 0x008C, 0x3F7C, 0x0000, + 0x3F94, 0x0004, 0x03F8, 0x0760, 0x0500, 0x0098, 0x3F7C, 0x3FFC, + 0x3F98, 0x0000, 0x03E0, 0x075C, 0x0514, 0x00A4, 0x3F78, 0x3FFC, + 0x3F9C, 0x3FF8, 0x03CC, 0x0754, 0x0528, 0x00B0, 0x3F78, 0x3FFC, + 0x3F9C, 0x3FF0, 0x03B8, 0x0754, 0x0538, 0x00BC, 0x3F78, 0x3FFC, + 0x3FA0, 0x3FE8, 0x03A4, 0x0750, 0x054C, 0x00CC, 0x3F74, 0x3FF8, + 0x3FA4, 0x3FE0, 0x0390, 0x074C, 0x055C, 0x00D8, 0x3F74, 0x3FF8, + 0x3FA4, 0x3FDC, 0x037C, 0x0744, 0x0570, 0x00E4, 0x3F74, 0x3FF8, + 0x3FA8, 0x3FD4, 0x0368, 0x0740, 0x0580, 0x00F4, 0x3F74, 0x3FF4, + 0x3FA8, 0x3FCC, 0x0354, 0x073C, 0x0590, 0x0104, 0x3F74, 0x3FF4, + 0x3FAC, 0x3FC8, 0x0340, 0x0730, 0x05A4, 0x0110, 0x3F74, 0x3FF4, + 0x3FB0, 0x3FC0, 0x0330, 0x0728, 0x05B4, 0x0120, 0x3F74, 0x3FF0, + 0x3FB0, 0x3FBC, 0x031C, 0x0724, 0x05C4, 0x0130, 0x3F70, 0x3FF0, + 0x3FB4, 0x3FB4, 0x0308, 0x0720, 0x05D4, 0x013C, 0x3F70, 0x3FF0, + 0x3FB8, 0x3FB0, 0x02F4, 0x0714, 0x05E4, 0x014C, 0x3F74, 0x3FEC, + 0x3FB8, 0x3FAC, 0x02E0, 0x0708, 0x05F8, 0x015C, 0x3F74, 0x3FEC, + 0x3FBC, 0x3FA8, 0x02CC, 0x0704, 0x0604, 0x016C, 0x3F74, 0x3FE8, + 0x3FC0, 0x3FA0, 0x02BC, 0x06F8, 0x0614, 0x017C, 0x3F74, 0x3FE8, + 0x3FC0, 0x3F9C, 0x02A8, 0x06F4, 0x0624, 0x018C, 0x3F74, 0x3FE4, + 0x3FC4, 0x3F98, 0x0294, 0x06E8, 0x0634, 0x019C, 0x3F74, 0x3FE4, + 0x3FC8, 0x3F94, 0x0284, 0x06D8, 0x0644, 0x01AC, 0x3F78, 0x3FE0, + 0x3FC8, 0x3F90, 0x0270, 0x06D4, 0x0650, 0x01BC, 0x3F78, 0x3FE0, + 0x3FCC, 0x3F8C, 0x025C, 0x06C8, 0x0660, 0x01D0, 0x3F78, 0x3FDC, + 0x3FCC, 0x3F8C, 0x024C, 0x06B8, 0x066C, 0x01E0, 0x3F7C, 0x3FDC, + 0x3FD0, 0x3F88, 0x0238, 0x06B0, 0x067C, 0x01F0, 0x3F7C, 0x3FD8, + 0x3FD4, 0x3F84, 0x0228, 0x069C, 0x0688, 0x0204, 0x3F80, 0x3FD8, + 0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4 +}; const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_3tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) - return filter_3tap_16p_117; + return filter_3tap_16p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) - return filter_3tap_16p_150; + return filter_3tap_16p_149; else return filter_3tap_16p_183; } @@ -1029,9 +1355,9 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio) if (ratio.value < dc_fixpt_one.value) return filter_3tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) - return filter_3tap_64p_117; + return filter_3tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) - return filter_3tap_64p_150; + return filter_3tap_64p_149; else return filter_3tap_64p_183; } @@ -1041,9 +1367,9 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio) if (ratio.value < dc_fixpt_one.value) return filter_4tap_16p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) - return filter_4tap_16p_117; + return filter_4tap_16p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) - return filter_4tap_16p_150; + return filter_4tap_16p_149; else return filter_4tap_16p_183; } @@ -1053,9 +1379,9 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio) if (ratio.value < dc_fixpt_one.value) return filter_4tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) - return filter_4tap_64p_117; + return filter_4tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) - return filter_4tap_64p_150; + return filter_4tap_64p_149; else return filter_4tap_64p_183; } @@ -1065,9 +1391,9 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio) if (ratio.value < dc_fixpt_one.value) return filter_5tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) - return filter_5tap_64p_117; + return filter_5tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) - return filter_5tap_64p_150; + return filter_5tap_64p_149; else return filter_5tap_64p_183; } @@ -1077,9 +1403,9 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio) if (ratio.value < dc_fixpt_one.value) return filter_6tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) - return filter_6tap_64p_117; + return filter_6tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) - return filter_6tap_64p_150; + return filter_6tap_64p_149; else return filter_6tap_64p_183; } @@ -1089,9 +1415,9 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio) if (ratio.value < dc_fixpt_one.value) return filter_7tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) - return filter_7tap_64p_117; + return filter_7tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) - return filter_7tap_64p_150; + return filter_7tap_64p_149; else return filter_7tap_64p_183; } @@ -1101,9 +1427,9 @@ const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio) if (ratio.value < dc_fixpt_one.value) return filter_8tap_64p_upscale; else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) - return filter_8tap_64p_117; + return filter_8tap_64p_116; else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) - return filter_8tap_64p_150; + return filter_8tap_64p_149; else return filter_8tap_64p_183; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c index f78cbae9db88..bb0e1b80ec3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c @@ -1,5 +1,5 @@ /* - * Copyright 2017 Advanced Micro Devices, Inc. + * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,15 +23,3 @@ * */ -#ifndef __DC_COMMON_DEFS_H__ -#define __DC_COMMON_DEFS_H__ - -#include "dm_services.h" -#include "dc_features.h" -#include "display_mode_structs.h" -#include "display_mode_enums.h" - - -double dml_round(double a); - -#endif /* __DC_COMMON_DEFS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 225955ec6d39..bc109d4fc6e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -27,25 +27,55 @@ #include "dc.h" #include "dc_dmub_srv.h" #include "../../dmub/inc/dmub_srv.h" -#include "dmub_fw_state.h" +#include "../../dmub/inc/dmub_gpint_cmd.h" #include "core_types.h" -#include "ipp.h" #define MAX_PIPES 6 /** * Get PSR state from firmware. */ -static void dmub_get_psr_state(uint32_t *psr_state) +static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state) { - // Not yet implemented - // Trigger GPINT interrupt from firmware + struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; + + // Send gpint command and wait for ack + dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30); + + dmub_srv_get_gpint_response(srv, psr_state); +} + +/** + * Set PSR version. + */ +static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + cmd.psr_set_version.header.type = DMUB_CMD__PSR; + cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION; + + if (stream->psr_version == 0x0) // Unsupported + return false; + else if (stream->psr_version == 0x1) + cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1; + else if (stream->psr_version == 0x2) + cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2; + + cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; } /** * Enable/Disable PSR. */ -static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable) +static void dmub_psr_enable(struct dmub_psr *dmub, bool enable) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; @@ -67,13 +97,13 @@ static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable) /** * Set PSR level. */ -static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level) +static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level) { union dmub_rb_cmd cmd; uint32_t psr_state = 0; struct dc_context *dc = dmub->ctx; - dmub_get_psr_state(&psr_state); + dmub_psr_get_state(dmub, &psr_state); if (psr_state == 0) return; @@ -91,7 +121,7 @@ static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level) /** * Setup PSR by programming phy registers and sending psr hw context values to firmware. */ -static bool dmub_setup_psr(struct dmub_psr *dmub, +static bool dmub_psr_copy_settings(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context) { @@ -101,21 +131,22 @@ static bool dmub_setup_psr(struct dmub_psr *dmub, = &cmd.psr_copy_settings.psr_copy_settings_data; struct pipe_ctx *pipe_ctx = NULL; struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + int i = 0; - for (int i = 0; i < MAX_PIPES; i++) { - if (res_ctx && - res_ctx->pipe_ctx[i].stream && - res_ctx->pipe_ctx[i].stream->link && - res_ctx->pipe_ctx[i].stream->link == link && - res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + for (i = 0; i < MAX_PIPES; i++) { + if (res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { pipe_ctx = &res_ctx->pipe_ctx[i]; break; } } - if (!pipe_ctx || - !&pipe_ctx->plane_res || - !&pipe_ctx->stream_res) + if (!pipe_ctx) + return false; + + // First, set the psr version + if (!dmub_psr_set_version(dmub, pipe_ctx->stream)) return false; // Program DP DPHY fast training registers @@ -138,10 +169,6 @@ static bool dmub_setup_psr(struct dmub_psr *dmub, copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst; - if (pipe_ctx->plane_res.hubp) - copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst; - else - copy_settings_data->hubp_inst = 0; if (pipe_ctx->plane_res.dpp) copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; else @@ -157,18 +184,9 @@ static bool dmub_setup_psr(struct dmub_psr *dmub, // Misc copy_settings_data->psr_level = psr_context->psr_level.u32all; - copy_settings_data->hyst_frames = psr_context->timehyst_frames; - copy_settings_data->hyst_lines = psr_context->hyst_lines; - copy_settings_data->phy_type = psr_context->phyType; - copy_settings_data->aux_repeat = psr_context->aux_repeats; - copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations; - copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock; + copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations; copy_settings_data->frame_delay = psr_context->frame_delay; - copy_settings_data->smu_phy_id = psr_context->smuPhyId; - copy_settings_data->num_of_controllers = psr_context->numberOfControllers; copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; - copy_settings_data->phy_num = psr_context->frame_delay & 0x7; - copy_settings_data->link_rate = psr_context->frame_delay & 0xF; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header); dc_dmub_srv_cmd_execute(dc->dmub_srv); @@ -178,10 +196,10 @@ static bool dmub_setup_psr(struct dmub_psr *dmub, } static const struct dmub_psr_funcs psr_funcs = { - .set_psr_enable = dmub_set_psr_enable, - .setup_psr = dmub_setup_psr, - .get_psr_state = dmub_get_psr_state, - .set_psr_level = dmub_set_psr_level, + .psr_copy_settings = dmub_psr_copy_settings, + .psr_enable = dmub_psr_enable, + .psr_get_state = dmub_psr_get_state, + .psr_set_level = dmub_psr_set_level, }; /** @@ -215,6 +233,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx) */ void dmub_psr_destroy(struct dmub_psr **dmub) { - kfree(dmub); + kfree(*dmub); *dmub = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h index 229958de3035..f404fecd6410 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -27,6 +27,7 @@ #define _DMUB_PSR_H_ #include "os_types.h" +#include "dc_link.h" struct dmub_psr { struct dc_context *ctx; @@ -34,14 +35,14 @@ struct dmub_psr { }; struct dmub_psr_funcs { - void (*set_psr_enable)(struct dmub_psr *dmub, bool enable); - bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context); - void (*get_psr_state)(uint32_t *psr_state); - void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level); + bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context); + void (*psr_enable)(struct dmub_psr *dmub, bool enable); + void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state); + void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level); }; struct dmub_psr *dmub_psr_create(struct dc_context *ctx); void dmub_psr_destroy(struct dmub_psr **dmub); -#endif /* _DCE_DMUB_H_ */ +#endif /* _DMUB_PSR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 5b689273ff44..10527593868c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -71,6 +71,8 @@ #define PANEL_POWER_UP_TIMEOUT 300 #define PANEL_POWER_DOWN_TIMEOUT 500 #define HPD_CHECK_INTERVAL 10 +#define OLED_POST_T7_DELAY 100 +#define OLED_PRE_T11_DELAY 150 #define CTX \ hws->ctx @@ -696,8 +698,10 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) } /*todo: cloned in stream enc, fix*/ -static bool is_panel_backlight_on(struct dce_hwseq *hws) +bool dce110_is_panel_backlight_on(struct dc_link *link) { + struct dc_context *ctx = link->ctx; + struct dce_hwseq *hws = ctx->dc->hwseq; uint32_t value; REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value); @@ -705,11 +709,12 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws) return value; } -static bool is_panel_powered_on(struct dce_hwseq *hws) +bool dce110_is_panel_powered_on(struct dc_link *link) { + struct dc_context *ctx = link->ctx; + struct dce_hwseq *hws = ctx->dc->hwseq; uint32_t pwr_seq_state, dig_on, dig_on_ovrd; - REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state); REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd); @@ -816,7 +821,7 @@ void dce110_edp_power_control( return; } - if (power_up != is_panel_powered_on(hwseq)) { + if (power_up != hwseq->funcs.is_panel_powered_on(link)) { /* Send VBIOS command to prompt eDP panel power */ if (power_up) { unsigned long long current_ts = dm_get_timestamp(ctx); @@ -896,7 +901,7 @@ void dce110_edp_backlight_control( return; } - if (enable && is_panel_backlight_on(hws)) { + if (enable && hws->funcs.is_panel_backlight_on(link)) { DC_LOG_HW_RESUME_S3( "%s: panel already powered up. Do nothing.\n", __func__); @@ -936,9 +941,21 @@ void dce110_edp_backlight_control( if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) edp_receiver_ready_T7(link); link_transmitter_control(ctx->dc_bios, &cntl); + + if (enable && link->dpcd_sink_ext_caps.bits.oled) + msleep(OLED_POST_T7_DELAY); + + if (link->dpcd_sink_ext_caps.bits.oled || + link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 || + link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1) + dc_link_backlight_enable_aux(link, enable); + /*edp 1.2*/ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) edp_receiver_ready_T9(link); + + if (!enable && link->dpcd_sink_ext_caps.bits.oled) + msleep(OLED_PRE_T11_DELAY); } void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) @@ -2576,17 +2593,6 @@ static void dce110_apply_ctx_for_surface( for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - - if (stream == pipe_ctx->stream) { - if (!pipe_ctx->top_pipe && - (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) - dc->hwss.pipe_control_lock(dc, pipe_ctx, true); - } - } - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream != stream) continue; @@ -2607,20 +2613,16 @@ static void dce110_apply_ctx_for_surface( } - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - - if ((stream == pipe_ctx->stream) && - (!pipe_ctx->top_pipe) && - (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) - dc->hwss.pipe_control_lock(dc, pipe_ctx, false); - } - if (dc->fbc_compressor) enable_fbc(dc, context); } +static void dce110_post_unlock_program_front_end( + struct dc *dc, + struct dc_state *context) +{ +} + static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; @@ -2683,6 +2685,23 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) .mirror = pipe_ctx->plane_state->horizontal_mirror }; + /** + * If the cursor's source viewport is clipped then we need to + * translate the cursor to appear in the correct position on + * the screen. + * + * This translation isn't affected by scaling so it needs to be + * done *after* we adjust the position for the scale factor. + * + * This is only done by opt-in for now since there are still + * some usecases like tiled display that might enable the + * cursor on both streams while expecting dc to clip it. + */ + if (pos_cpy.translate_by_source) { + pos_cpy.x += pipe_ctx->plane_state->src_rect.x; + pos_cpy.y += pipe_ctx->plane_state->src_rect.y; + } + if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) pos_cpy.enable = false; @@ -2722,6 +2741,7 @@ static const struct hw_sequencer_funcs dce110_funcs = { .init_hw = init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, + .post_unlock_program_front_end = dce110_post_unlock_program_front_end, .update_plane_addr = update_plane_addr, .update_pending_status = dce110_update_pending_status, .enable_accelerated_mode = dce110_enable_accelerated_mode, @@ -2736,6 +2756,8 @@ static const struct hw_sequencer_funcs dce110_funcs = { .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dce110_power_down_fe, .pipe_control_lock = dce_pipe_control_lock, + .interdependent_update_lock = NULL, + .cursor_lock = dce_pipe_control_lock, .prepare_bandwidth = dce110_prepare_bandwidth, .optimize_bandwidth = dce110_optimize_bandwidth, .set_drr = set_drr, @@ -2763,6 +2785,8 @@ static const struct hwseq_private_funcs dce110_private_funcs = { .disable_stream_gating = NULL, .enable_stream_gating = NULL, .edp_backlight_control = dce110_edp_backlight_control, + .is_panel_backlight_on = dce110_is_panel_backlight_on, + .is_panel_powered_on = dce110_is_panel_powered_on, }; void dce110_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 26a9c14a58b1..34be166e8ff0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -85,5 +85,9 @@ void dce110_edp_wait_for_hpd_ready( struct dc_link *link, bool power_up); +bool dce110_is_panel_backlight_on(struct dc_link *link); + +bool dce110_is_panel_powered_on(struct dc_link *link); + #endif /* __DC_HWSS_DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index bbd6e01b3eca..47a39eb9400b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -316,6 +316,7 @@ bool cm_helper_translate_curve_to_hw_format( struct pwl_result_data *rgb_resulted; struct pwl_result_data *rgb; struct pwl_result_data *rgb_plus_1; + struct pwl_result_data *rgb_minus_1; int32_t region_start, region_end; int32_t i; @@ -465,9 +466,20 @@ bool cm_helper_translate_curve_to_hw_format( rgb = rgb_resulted; rgb_plus_1 = rgb_resulted + 1; + rgb_minus_1 = rgb; i = 1; while (i != hw_points + 1) { + + if (i >= hw_points - 1) { + if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) + rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red); + if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) + rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green); + if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) + rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue); + } + rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); @@ -482,6 +494,7 @@ bool cm_helper_translate_curve_to_hw_format( } ++rgb_plus_1; + rgb_minus_1 = rgb; ++rgb; ++i; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index 446ba0a7a4b3..deccab0228d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -128,8 +128,8 @@ bool hubbub1_verify_allow_pstate_change_high( * pstate takes around ~100us on linux. Unknown currently as to * why it takes that long on linux */ - static unsigned int pstate_wait_timeout_us = 200; - static unsigned int pstate_wait_expected_timeout_us = 40; + const unsigned int pstate_wait_timeout_us = 200; + const unsigned int pstate_wait_expected_timeout_us = 40; static unsigned int max_sampled_pstate_wait_us; /* data collection */ static bool forced_pstate_allow; /* help with revert wa */ @@ -147,8 +147,9 @@ bool hubbub1_verify_allow_pstate_change_high( forced_pstate_allow = false; } - /* RV2: - * dchubbubdebugind, at: 0xB + /* The following table only applies to DCN1 and DCN2, + * for newer DCNs, need to consult with HW IP folks to read RTL + * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB * description * 0: Pipe0 Plane0 Allow Pstate Change * 1: Pipe0 Plane1 Allow Pstate Change @@ -181,64 +182,6 @@ bool hubbub1_verify_allow_pstate_change_high( * 28: WB0 Allow Pstate Change * 29: WB1 Allow Pstate Change * 30: Arbiter's allow_pstate_change - * 31: SOC pstate change request" - */ - /*DCN2.x: - HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB - 0: Pipe0 Plane0 Allow P-state Change - 1: Pipe0 Plane1 Allow P-state Change - 2: Pipe0 Cursor0 Allow P-state Change - 3: Pipe0 Cursor1 Allow P-state Change - 4: Pipe1 Plane0 Allow P-state Change - 5: Pipe1 Plane1 Allow P-state Change - 6: Pipe1 Cursor0 Allow P-state Change - 7: Pipe1 Cursor1 Allow P-state Change - 8: Pipe2 Plane0 Allow P-state Change - 9: Pipe2 Plane1 Allow P-state Change - 10: Pipe2 Cursor0 Allow P-state Change - 11: Pipe2 Cursor1 Allow P-state Change - 12: Pipe3 Plane0 Allow P-state Change - 13: Pipe3 Plane1 Allow P-state Change - 14: Pipe3 Cursor0 Allow P-state Change - 15: Pipe3 Cursor1 Allow P-state Change - 16: Pipe4 Plane0 Allow P-state Change - 17: Pipe4 Plane1 Allow P-state Change - 18: Pipe4 Cursor0 Allow P-state Change - 19: Pipe4 Cursor1 Allow P-state Change - 20: Pipe5 Plane0 Allow P-state Change - 21: Pipe5 Plane1 Allow P-state Change - 22: Pipe5 Cursor0 Allow P-state Change - 23: Pipe5 Cursor1 Allow P-state Change - 24: Pipe6 Plane0 Allow P-state Change - 25: Pipe6 Plane1 Allow P-state Change - 26: Pipe6 Cursor0 Allow P-state Change - 27: Pipe6 Cursor1 Allow P-state Change - 28: WB0 Allow P-state Change - 29: WB1 Allow P-state Change - 30: Arbiter`s Allow P-state Change - 31: SOC P-state Change request - */ - /* RV1: - * dchubbubdebugind, at: 0x7 - * description "3-0: Pipe0 cursor0 QOS - * 7-4: Pipe1 cursor0 QOS - * 11-8: Pipe2 cursor0 QOS - * 15-12: Pipe3 cursor0 QOS - * 16: Pipe0 Plane0 Allow Pstate Change - * 17: Pipe1 Plane0 Allow Pstate Change - * 18: Pipe2 Plane0 Allow Pstate Change - * 19: Pipe3 Plane0 Allow Pstate Change - * 20: Pipe0 Plane1 Allow Pstate Change - * 21: Pipe1 Plane1 Allow Pstate Change - * 22: Pipe2 Plane1 Allow Pstate Change - * 23: Pipe3 Plane1 Allow Pstate Change - * 24: Pipe0 cursor0 Allow Pstate Change - * 25: Pipe1 cursor0 Allow Pstate Change - * 26: Pipe2 cursor0 Allow Pstate Change - * 27: Pipe3 cursor0 Allow Pstate Change - * 28: WB0 Allow Pstate Change - * 29: WB1 Allow Pstate Change - * 30: Arbiter's allow_pstate_change * 31: SOC pstate change request */ @@ -300,7 +243,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub) DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); } -void hubbub1_program_urgent_watermarks( +bool hubbub1_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -308,6 +251,7 @@ void hubbub1_program_urgent_watermarks( { struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); uint32_t prog_wm_value; + bool wm_pending = false; /* Repeat for water mark set A, B, C and D. */ /* clock state A */ @@ -321,7 +265,8 @@ void hubbub1_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.urgent_ns, prog_wm_value); - } + } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns) + wm_pending = true; if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) { hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns; @@ -331,7 +276,8 @@ void hubbub1_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.pte_meta_urgent_ns, prog_wm_value); - } + } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns) + wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { @@ -344,7 +290,8 @@ void hubbub1_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.urgent_ns, prog_wm_value); - } + } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns) + wm_pending = true; if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) { hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns; @@ -354,7 +301,8 @@ void hubbub1_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.pte_meta_urgent_ns, prog_wm_value); - } + } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns) + wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { @@ -367,7 +315,8 @@ void hubbub1_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.urgent_ns, prog_wm_value); - } + } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns) + wm_pending = true; if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) { hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns; @@ -377,7 +326,8 @@ void hubbub1_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.pte_meta_urgent_ns, prog_wm_value); - } + } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns) + wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { @@ -390,7 +340,8 @@ void hubbub1_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.urgent_ns, prog_wm_value); - } + } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns) + wm_pending = true; if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) { hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns; @@ -400,10 +351,13 @@ void hubbub1_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.pte_meta_urgent_ns, prog_wm_value); - } + } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns) + wm_pending = true; + + return wm_pending; } -void hubbub1_program_stutter_watermarks( +bool hubbub1_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -411,6 +365,7 @@ void hubbub1_program_stutter_watermarks( { struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); uint32_t prog_wm_value; + bool wm_pending = false; /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns @@ -425,7 +380,9 @@ void hubbub1_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } + } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) { @@ -439,7 +396,9 @@ void hubbub1_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); - } + } else if (watermarks->a.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) + wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns @@ -454,7 +413,9 @@ void hubbub1_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } + } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) { @@ -468,7 +429,9 @@ void hubbub1_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); - } + } else if (watermarks->b.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) + wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns @@ -483,7 +446,9 @@ void hubbub1_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } + } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { @@ -497,7 +462,9 @@ void hubbub1_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); - } + } else if (watermarks->c.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) + wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns @@ -512,7 +479,9 @@ void hubbub1_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } + } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) { @@ -526,11 +495,14 @@ void hubbub1_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); - } + } else if (watermarks->d.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) + wm_pending = true; + return wm_pending; } -void hubbub1_program_pstate_watermarks( +bool hubbub1_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -538,6 +510,7 @@ void hubbub1_program_pstate_watermarks( { struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); uint32_t prog_wm_value; + bool wm_pending = false; /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns @@ -552,7 +525,9 @@ void hubbub1_program_pstate_watermarks( DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); - } + } else if (watermarks->a.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) + wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns @@ -567,7 +542,9 @@ void hubbub1_program_pstate_watermarks( DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); - } + } else if (watermarks->b.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) + wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns @@ -582,7 +559,9 @@ void hubbub1_program_pstate_watermarks( DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); - } + } else if (watermarks->c.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) + wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns @@ -597,23 +576,33 @@ void hubbub1_program_pstate_watermarks( DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); - } + } else if (watermarks->d.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) + wm_pending = true; + + return wm_pending; } -void hubbub1_program_watermarks( +bool hubbub1_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + bool wm_pending = false; /* * Need to clamp to max of the register values (i.e. no wrap) * for dcn1, all wm registers are 21-bit wide */ - hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); - hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); - hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); + if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); @@ -627,6 +616,7 @@ void hubbub1_program_watermarks( DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); #endif + return wm_pending; } void hubbub1_update_dchub( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index af57751253de..343a537172c7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -308,7 +308,7 @@ bool hubbub1_verify_allow_pstate_change_high( void hubbub1_wm_change_req_wa(struct hubbub *hubbub); -void hubbub1_program_watermarks( +bool hubbub1_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -331,17 +331,17 @@ void hubbub1_construct(struct hubbub *hubbub, const struct dcn_hubbub_shift *hubbub_shift, const struct dcn_hubbub_mask *hubbub_mask); -void hubbub1_program_urgent_watermarks( +bool hubbub1_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); -void hubbub1_program_stutter_watermarks( +bool hubbub1_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); -void hubbub1_program_pstate_watermarks( +bool hubbub1_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 1008ac8a0f2a..416afb99529d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -48,8 +48,8 @@ #include "dc_link_dp.h" #include "dccg.h" #include "clk_mgr.h" - - +#include "link_hwss.h" +#include "dpcd_defs.h" #include "dsc.h" #define DC_LOGGER_INIT(logger) @@ -82,7 +82,7 @@ void print_microsec(struct dc_context *dc_ctx, us_x10 % frac); } -static void dcn10_lock_all_pipes(struct dc *dc, +void dcn10_lock_all_pipes(struct dc *dc, struct dc_state *context, bool lock) { @@ -93,6 +93,7 @@ static void dcn10_lock_all_pipes(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe_ctx = &context->res_ctx.pipe_ctx[i]; tg = pipe_ctx->stream_res.tg; + /* * Only lock the top pipe's tg to prevent redundant * (un)locking. Also skip if pipe is disabled. @@ -103,9 +104,9 @@ static void dcn10_lock_all_pipes(struct dc *dc, continue; if (lock) - tg->funcs->lock(tg); + dc->hwss.pipe_control_lock(dc, pipe_ctx, true); else - tg->funcs->unlock(tg); + dc->hwss.pipe_control_lock(dc, pipe_ctx, false); } } @@ -900,6 +901,10 @@ static void dcn10_reset_back_end_for_pipe( * parent pipe. */ if (pipe_ctx->top_pipe == NULL) { + + if (pipe_ctx->stream_res.abm) + pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm); + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); @@ -1263,7 +1268,8 @@ void dcn10_init_hw(struct dc *dc) } //Enable ability to power gate / don't force power on permanently - hws->funcs.enable_power_gating_plane(hws, true); + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(hws, true); return; } @@ -1317,6 +1323,31 @@ void dcn10_init_hw(struct dc *dc) if (hws->funcs.dsc_pg_control != NULL) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); + /* we want to turn off all dp displays before doing detection */ + if (dc->config.power_down_display_on_boot) { + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) + continue; + + /* + * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(), + * which needs to read dpcd info with the help of aconnector. + * If aconnector (dc->links[i]->prev) is NULL, then dpcd status + * cannot be read. + */ + if (dc->links[i]->priv) { + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) + dp_receiver_power_ctrl(dc->links[i], false); + } + } + } + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -1325,6 +1356,9 @@ void dcn10_init_hw(struct dc *dc) */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { hws->funcs.init_pipes(dc, dc->current_state); + if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) + dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, + !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } for (i = 0; i < res_pool->audio_count; i++) { @@ -1355,8 +1389,8 @@ void dcn10_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - - hws->funcs.enable_power_gating_plane(dc->hwseq, true); + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); @@ -1576,7 +1610,7 @@ void dcn10_pipe_control_lock( /* use TG master update lock to lock everything on the TG * therefore only top pipe need to lock */ - if (pipe->top_pipe) + if (!pipe || pipe->top_pipe) return; if (dc->debug.sanity_checks) @@ -1591,6 +1625,85 @@ void dcn10_pipe_control_lock( hws->funcs.verify_allow_pstate_change_high(dc); } +/** + * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE. + * + * Software keepout workaround to prevent cursor update locking from stalling + * out cursor updates indefinitely or from old values from being retained in + * the case where the viewport changes in the same frame as the cursor. + * + * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's + * too close to VUPDATE, then stall out until VUPDATE finishes. + * + * TODO: Optimize cursor programming to be once per frame before VUPDATE + * to avoid the need for this workaround. + */ +static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct crtc_position position; + uint32_t vupdate_start, vupdate_end; + unsigned int lines_to_vupdate, us_to_vupdate, vpos; + unsigned int us_per_line, us_vupdate; + + if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position) + return; + + if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg) + return; + + dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start, + &vupdate_end); + + dc->hwss.get_position(&pipe_ctx, 1, &position); + vpos = position.vertical_count; + + /* Avoid wraparound calculation issues */ + vupdate_start += stream->timing.v_total; + vupdate_end += stream->timing.v_total; + vpos += stream->timing.v_total; + + if (vpos <= vupdate_start) { + /* VPOS is in VACTIVE or back porch. */ + lines_to_vupdate = vupdate_start - vpos; + } else if (vpos > vupdate_end) { + /* VPOS is in the front porch. */ + return; + } else { + /* VPOS is in VUPDATE. */ + lines_to_vupdate = 0; + } + + /* Calculate time until VUPDATE in microseconds. */ + us_per_line = + stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz; + us_to_vupdate = lines_to_vupdate * us_per_line; + + /* 70 us is a conservative estimate of cursor update time*/ + if (us_to_vupdate > 70) + return; + + /* Stall out until the cursor update completes. */ + if (vupdate_end < vupdate_start) + vupdate_end += stream->timing.v_total; + us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; + udelay(us_to_vupdate + us_vupdate); +} + +void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) +{ + /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */ + if (!pipe || pipe->top_pipe) + return; + + /* Prevent cursor lock from stalling out cursor updates. */ + if (lock) + delay_cursor_until_vupdate(dc, pipe); + + dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc, + pipe->stream_res.opp->inst, lock); +} + static bool wait_for_reset_trigger_to_occur( struct dc_context *dc_ctx, struct timing_generator *tg) @@ -1970,6 +2083,12 @@ void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) adjust.temperature_matrix[i] = pipe_ctx->stream->gamut_remap_matrix.matrix[i]; + } else if (pipe_ctx->plane_state && + pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) { + adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) + adjust.temperature_matrix[i] = + pipe_ctx->plane_state->gamut_remap_matrix.matrix[i]; } pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); @@ -2090,6 +2209,10 @@ void dcn10_get_hdr_visual_confirm_color( if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { /* HDR10, ARGB2101010 - set boarder color to red */ color->color_r_cr = color_value; + } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + /* FreeSync 2 ARGB2101010 - set boarder color to pink */ + color->color_r_cr = color_value; + color->color_b_cb = color_value; } break; case PIXEL_FORMAT_FP16: @@ -2512,12 +2635,17 @@ void dcn10_apply_ctx_for_surface( int i; struct timing_generator *tg; uint32_t underflow_check_delay_us; - bool removed_pipe[4] = { false }; bool interdependent_update = false; struct pipe_ctx *top_pipe_to_program = dcn10_find_top_pipe_for_stream(dc, context, stream); DC_LOGGER_INIT(dc->ctx->logger); + // Clear pipe_ctx flag + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + pipe_ctx->update_flags.raw = 0; + } + if (!top_pipe_to_program) return; @@ -2531,11 +2659,6 @@ void dcn10_apply_ctx_for_surface( if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); - if (interdependent_update) - dcn10_lock_all_pipes(dc, context, true); - else - dcn10_pipe_control_lock(dc, top_pipe_to_program, true); - if (underflow_check_delay_us != 0xFFFFFFFF) udelay(underflow_check_delay_us); @@ -2552,18 +2675,6 @@ void dcn10_apply_ctx_for_surface( struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - /* - * Powergate reused pipes that are not powergated - * fairly hacky right now, using opp_id as indicator - * TODO: After move dc_post to dc_update, this will - * be removed. - */ - if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) { - if (old_pipe_ctx->stream_res.tg == tg && - old_pipe_ctx->plane_res.hubp && - old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID) - dc->hwss.disable_plane(dc, old_pipe_ctx); - } if ((!pipe_ctx->plane_state || pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && @@ -2571,7 +2682,7 @@ void dcn10_apply_ctx_for_surface( old_pipe_ctx->stream_res.tg == tg) { hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx); - removed_pipe[i] = true; + pipe_ctx->update_flags.bits.disable = 1; DC_LOG_DC("Reset mpcc for pipe %d\n", old_pipe_ctx->pipe_idx); @@ -2597,21 +2708,35 @@ void dcn10_apply_ctx_for_surface( &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs); } +} - if (interdependent_update) - dcn10_lock_all_pipes(dc, context, false); - else - dcn10_pipe_control_lock(dc, top_pipe_to_program, false); +void dcn10_post_unlock_program_front_end( + struct dc *dc, + struct dc_state *context) +{ + int i; + + DC_LOGGER_INIT(dc->ctx->logger); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx->top_pipe && + !pipe_ctx->prev_odm_pipe && + pipe_ctx->stream) { + struct timing_generator *tg = pipe_ctx->stream_res.tg; - if (num_planes == 0) - false_optc_underflow_wa(dc, stream, tg); + if (context->stream_status[i].plane_count == 0) + false_optc_underflow_wa(dc, pipe_ctx->stream, tg); + } + } for (i = 0; i < dc->res_pool->pipe_count; i++) - if (removed_pipe[i]) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); for (i = 0; i < dc->res_pool->pipe_count; i++) - if (removed_pipe[i]) { + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) { dc->hwss.optimize_bandwidth(dc, context); break; } @@ -2656,7 +2781,7 @@ void dcn10_prepare_bandwidth( false); } - hubbub->funcs->program_watermarks(hubbub, + dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); @@ -2693,6 +2818,7 @@ void dcn10_optimize_bandwidth( &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); + dcn10_stereo_hw_frame_pack_wa(dc, context); if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) @@ -2884,6 +3010,7 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct timing_generator *tg = pipe_ctx->stream_res.tg; bool flip_pending; + struct dc *dc = plane_state->ctx->dc; if (plane_state == NULL) return; @@ -2901,6 +3028,19 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) plane_state->status.is_right_eye = !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg); } + + if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) { + struct dce_hwseq *hwseq = dc->hwseq; + struct timing_generator *tg = dc->res_pool->timing_generators[0]; + unsigned int cur_frame = tg->funcs->get_frame_count(tg); + + if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) { + struct hubbub *hubbub = dc->res_pool->hubbub; + + hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter); + hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false; + } + } } void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) @@ -2960,12 +3100,50 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) int x_pos = pos_cpy.x; int y_pos = pos_cpy.y; - // translate cursor from stream space to plane space + /** + * DC cursor is stream space, HW cursor is plane space and drawn + * as part of the framebuffer. + * + * Cursor position can't be negative, but hotspot can be used to + * shift cursor out of the plane bounds. Hotspot must be smaller + * than the cursor size. + */ + + /** + * Translate cursor from stream space to plane space. + * + * If the cursor is scaled then we need to scale the position + * to be in the approximately correct place. We can't do anything + * about the actual size being incorrect, that's a limitation of + * the hardware. + */ x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width / pipe_ctx->plane_state->dst_rect.width; y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height / pipe_ctx->plane_state->dst_rect.height; + /** + * If the cursor's source viewport is clipped then we need to + * translate the cursor to appear in the correct position on + * the screen. + * + * This translation isn't affected by scaling so it needs to be + * done *after* we adjust the position for the scale factor. + * + * This is only done by opt-in for now since there are still + * some usecases like tiled display that might enable the + * cursor on both streams while expecting dc to clip it. + */ + if (pos_cpy.translate_by_source) { + x_pos += pipe_ctx->plane_state->src_rect.x; + y_pos += pipe_ctx->plane_state->src_rect.y; + } + + /** + * If the position is negative then we need to add to the hotspot + * to shift the cursor outside the plane. + */ + if (x_pos < 0) { pos_cpy.x_hotspot -= x_pos; x_pos = 0; @@ -3127,7 +3305,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) return vertical_line_start; } -static void dcn10_calc_vupdate_position( +void dcn10_calc_vupdate_position( struct dc *dc, struct pipe_ctx *pipe_ctx, uint32_t *start_line, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 4d20f6586bb5..42b6e016d71e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -34,6 +34,11 @@ struct dc; void dcn10_hw_sequencer_construct(struct dc *dc); int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); +void dcn10_calc_vupdate_position( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + uint32_t *start_line, + uint32_t *end_line); void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, @@ -49,6 +54,7 @@ void dcn10_pipe_control_lock( struct dc *dc, struct pipe_ctx *pipe, bool lock); +void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock); void dcn10_blank_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -70,11 +76,18 @@ void dcn10_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context); void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_lock_all_pipes( + struct dc *dc, + struct dc_state *context, + bool lock); void dcn10_apply_ctx_for_surface( struct dc *dc, const struct dc_stream_state *stream, int num_planes, struct dc_state *context); +void dcn10_post_unlock_program_front_end( + struct dc *dc, + struct dc_state *context); void dcn10_hubp_pg_control( struct dce_hwseq *hws, unsigned int hubp_inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index e7e5352ec424..9e8e32629e47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -32,6 +32,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .init_hw = dcn10_init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, + .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, .update_plane_addr = dcn10_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, @@ -49,6 +50,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn10_disable_plane, .pipe_control_lock = dcn10_pipe_control_lock, + .cursor_lock = dcn10_cursor_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, .prepare_bandwidth = dcn10_prepare_bandwidth, .optimize_bandwidth = dcn10_optimize_bandwidth, .set_drr = dcn10_set_drr, @@ -69,6 +72,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, }; static const struct hwseq_private_funcs dcn10_private_funcs = { @@ -85,6 +89,8 @@ static const struct hwseq_private_funcs dcn10_private_funcs = { .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, .enable_stream_timing = dcn10_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, + .is_panel_backlight_on = dce110_is_panel_backlight_on, + .is_panel_powered_on = dce110_is_panel_powered_on, .disable_stream_gating = NULL, .enable_stream_gating = NULL, .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 1a37c90e9d43..d3617d6785a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -782,6 +782,11 @@ bool dcn10_link_encoder_validate_output_with_stream( struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); bool is_valid; + //if SCDC (340-600MHz) is disabled, set to HDMI 1.4 timing limit + if (stream->sink->edid_caps.panel_patch.skip_scdc_overwrite && + enc10->base.features.max_hdmi_pixel_clock > 300000) + enc10->base.features.max_hdmi_pixel_clock = 300000; + switch (stream->signal) { case SIGNAL_TYPE_DVI_SINGLE_LINK: case SIGNAL_TYPE_DVI_DUAL_LINK: diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index eb13589b9a81..762109174fb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -62,11 +62,11 @@ SRI(DP_DPHY_FAST_TRAINING, DP, id), \ SRI(DP_SEC_CNTL1, DP, id), \ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) #define LE_DCN10_REG_LIST(id)\ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ LE_DCN_COMMON_REG_LIST(id) struct dcn10_link_enc_aux_registers { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 04f863499cfb..3fcd408e9103 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane( REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id); + /* Configure VUPDATE lock set for this MPCC to map to the OPP */ + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id); + /* update mpc tree mux setting */ if (tree->opp_list == insert_above_mpcc) { /* insert the toppest mpcc */ @@ -318,6 +321,7 @@ void mpc1_remove_mpcc( REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); /* mark this mpcc as not in use */ mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id); @@ -328,6 +332,7 @@ void mpc1_remove_mpcc( REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); } } @@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc) REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); } @@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); @@ -453,6 +460,13 @@ void mpc1_read_mpcc_state( MPCC_BUSY, &s->busy); } +void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + + REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0); +} + static const struct mpc_funcs dcn10_mpc_funcs = { .read_mpcc_state = mpc1_read_mpcc_state, .insert_plane = mpc1_insert_plane, @@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = { .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, .update_blending = mpc1_update_blending, + .cursor_lock = mpc1_cursor_lock, .set_denorm = NULL, .set_denorm_clamp = NULL, .set_output_csc = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h index 962a68e322ee..66a4719c22a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h @@ -39,11 +39,12 @@ SRII(MPCC_BG_G_Y, MPCC, inst),\ SRII(MPCC_BG_R_CR, MPCC, inst),\ SRII(MPCC_BG_B_CB, MPCC, inst),\ - SRII(MPCC_BG_B_CB, MPCC, inst),\ - SRII(MPCC_SM_CONTROL, MPCC, inst) + SRII(MPCC_SM_CONTROL, MPCC, inst),\ + SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) #define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \ - SRII(MUX, MPC_OUT, inst) + SRII(MUX, MPC_OUT, inst),\ + VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) #define MPC_COMMON_REG_VARIABLE_LIST \ uint32_t MPCC_TOP_SEL[MAX_MPCC]; \ @@ -55,7 +56,9 @@ uint32_t MPCC_BG_R_CR[MAX_MPCC]; \ uint32_t MPCC_BG_B_CB[MAX_MPCC]; \ uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \ - uint32_t MUX[MAX_OPP]; + uint32_t MUX[MAX_OPP]; \ + uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \ + uint32_t CUR[MAX_OPP]; #define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\ @@ -78,7 +81,8 @@ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\ - SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh) + SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\ + SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh) #define MPC_REG_FIELD_LIST(type) \ type MPCC_TOP_SEL;\ @@ -101,7 +105,9 @@ type MPCC_SM_FIELD_ALT;\ type MPCC_SM_FORCE_NEXT_FRAME_POL;\ type MPCC_SM_FORCE_NEXT_TOP_POL;\ - type MPC_OUT_MUX; + type MPC_OUT_MUX;\ + type MPCC_UPDATE_LOCK_SEL;\ + type CUR_VUPDATE_LOCK_SET; struct dcn_mpc_registers { MPC_COMMON_REG_VARIABLE_LIST @@ -192,4 +198,6 @@ void mpc1_read_mpcc_state( int mpcc_inst, struct mpcc_state *s); +void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index a9a43b397db9..17d96ec6acd8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc, uint32_t asic_blank_end; uint32_t v_init; uint32_t v_fp2 = 0; - int32_t vertical_line_start; struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc, patched_crtc_timing.v_border_top; /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */ - vertical_line_start = asic_blank_end - optc1->vstartup_start + 1; - if (vertical_line_start < 0) - v_fp2 = -vertical_line_start; + if (optc1->vstartup_start > asic_blank_end) + v_fp2 = optc1->vstartup_start - asic_blank_end; /* Interlace */ if (REG(OTG_INTERLACE_CONTROL)) { @@ -345,6 +343,23 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab } /** + * optc1_set_timing_double_buffer() - DRR double buffering control + * + * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, + * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. + * + * Options: any time, start of frame, dp start of frame (range timing) + */ +void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t mode = enable ? 2 : 0; + + REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, + OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mode); +} + +/** * unblank_crtc * Call ASIC Control Object to UnBlank CRTC. */ @@ -1195,7 +1210,7 @@ static void optc1_enable_stereo(struct timing_generator *optc, REG_UPDATE_3(OTG_STEREO_CONTROL, OTG_STEREO_EN, stereo_en, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0, - OTG_STEREO_SYNC_OUTPUT_POLARITY, 0); + OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1); if (flags->PROGRAM_POLARITY) REG_UPDATE(OTG_STEREO_CONTROL, @@ -1355,6 +1370,7 @@ void optc1_clear_optc_underflow(struct timing_generator *optc) void optc1_tg_init(struct timing_generator *optc) { optc1_set_blank_data_double_buffer(optc, true); + optc1_set_timing_double_buffer(optc, true); optc1_clear_optc_underflow(optc); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index f277656d5464..9a459a8fe8a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -185,6 +185,7 @@ struct dcn_optc_registers { SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ @@ -643,6 +644,8 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc); void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable); +void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable); + bool optc1_get_otg_active_size(struct timing_generator *optc, uint32_t *otg_active_width, uint32_t *otg_active_height); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 3b71898e859e..ba849aa31e6e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id { .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## 0 ## _ ## block ## id + +/* set field/register/bitfield name */ +#define SFRB(field_name, reg_name, bitfield, post_fix)\ + .field_name = reg_name ## __ ## bitfield ## post_fix + /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIF_BASE__INST0_SEG ## seg @@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = { }; static const struct dcn_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) + MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\ + SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT) }; static const struct dcn_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), + MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\ + SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK) }; #define tg_regs(id)\ @@ -552,7 +562,8 @@ static const struct dc_plane_cap plane_cap = { .pixel_format_support = { .argb8888 = true, .nv12 = true, - .fp16 = true + .fp16 = true, + .p010 = true }, .max_upscale_factor = { @@ -570,7 +581,7 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, @@ -584,7 +595,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_pplib_clock_request = false, .disable_pplib_wm_range = false, .pplib_wm_report_mode = WM_REPORT_DEFAULT, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = true, .disable_dcc = DCC_ENABLE, .voltage_align_fclk = true, @@ -598,7 +609,7 @@ static const struct dc_debug_options debug_defaults_drv = { }; static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, @@ -1233,7 +1244,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont return DC_OK; } -static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) +static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -1295,7 +1306,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, - .get_default_swizzle_mode = dcn10_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn10_patch_unknown_plane_state, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 376c4264d295..7eba9333c328 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -1667,5 +1667,6 @@ void dcn10_stream_encoder_construct( enc1->regs = regs; enc1->se_shift = se_shift; enc1->se_mask = se_mask; + enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 50bffbfdd394..62cc2651e00c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -70,6 +70,8 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0); } + + dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } void dccg2_get_dccg_ref_freq(struct dccg *dccg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 13e057d7ee93..42bba7c9548b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes( } } -#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) - -bool dpp2_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ - if (scl_data->viewport.width != scl_data->h_active && - scl_data->viewport.height != scl_data->v_active && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->format == PIXEL_FORMAT_FP16) - return false; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - /* TODO: add lb check */ - - /* No support for programming ratio of 8, drop to 7.99999.. */ - if (scl_data->ratios.horz.value == (8ll << 32)) - scl_data->ratios.horz.value--; - if (scl_data->ratios.vert.value == (8ll << 32)) - scl_data->ratios.vert.value--; - if (scl_data->ratios.horz_c.value == (8ll << 32)) - scl_data->ratios.horz_c.value--; - if (scl_data->ratios.vert_c.value == (8ll << 32)) - scl_data->ratios.vert_c.value--; - - /* Set default taps if none are provided */ - if (in_taps->h_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz) > 4) - scl_data->taps.h_taps = 8; - else - scl_data->taps.h_taps = 4; - } else - scl_data->taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert) > 4) - scl_data->taps.v_taps = 8; - else - scl_data->taps.v_taps = 4; - } else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4) - scl_data->taps.v_taps_c = 4; - else - scl_data->taps.v_taps_c = 2; - } else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4) - scl_data->taps.h_taps_c = 4; - else - scl_data->taps.h_taps_c = 2; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - void oppn20_dummy_program_regamma_pwl( struct dpp *dpp, const struct pwl_params *params, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 6bdfee20b6a7..1b1ae9ce2799 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable; dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth; dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; + dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. @@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6; reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size; - reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf; } static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 9235f7d29454..c0b21d7450d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -562,19 +562,23 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, } } -static void hubbub2_program_watermarks( +static bool hubbub2_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + bool wm_pending = false; /* * Need to clamp to max of the register values (i.e. no wrap) * for dcn1, all wm registers are 21-bit wide */ - hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); - hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); + if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; /* * There's a special case when going from p-state support to p-state unsupported @@ -592,6 +596,7 @@ static void hubbub2_program_watermarks( REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180); hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + return wm_pending; } static const struct hubbub_funcs hubbub2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index a444fed94184..a023a4d59f41 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -307,7 +307,8 @@ void dcn20_init_blank( COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, - otg_active_height); + otg_active_height, + 0); if (num_opps == 2) { bottom_opp->funcs->opp_set_disp_pattern_generator( @@ -317,7 +318,8 @@ void dcn20_init_blank( COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, - otg_active_height); + otg_active_height, + 0); } hws->funcs.wait_for_blank_complete(opp); @@ -645,6 +647,9 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } + if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) + dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); + pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, &stream->timing, @@ -974,7 +979,8 @@ void dcn20_blank_pixel_data( stream->timing.display_color_depth, &black_color, width, - height); + height, + 0); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator( @@ -985,7 +991,8 @@ void dcn20_blank_pixel_data( stream->timing.display_color_depth, &black_color, width, - height); + height, + 0); } if (!blank) @@ -1088,29 +1095,6 @@ void dcn20_enable_plane( // } } - -void dcn20_pipe_control_lock_global( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock) -{ - if (lock) { - pipe->stream_res.tg->funcs->lock_doublebuffer_enable( - pipe->stream_res.tg); - pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); - } else { - pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); - pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, - CRTC_STATE_VACTIVE); - pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, - CRTC_STATE_VBLANK); - pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, - CRTC_STATE_VACTIVE); - pipe->stream_res.tg->funcs->lock_doublebuffer_disable( - pipe->stream_res.tg); - } -} - void dcn20_pipe_control_lock( struct dc *dc, struct pipe_ctx *pipe, @@ -1121,7 +1105,7 @@ void dcn20_pipe_control_lock( /* use TG master update lock to lock everything on the TG * therefore only top pipe need to lock */ - if (pipe->top_pipe) + if (!pipe || pipe->top_pipe) return; if (pipe->plane_state != NULL) @@ -1389,6 +1373,7 @@ static void dcn20_update_dchubp_dpp( } if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.position_change) || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { @@ -1536,48 +1521,32 @@ static void dcn20_program_pipe( } } -static bool does_pipe_need_lock(struct pipe_ctx *pipe) -{ - if ((pipe->plane_state && pipe->plane_state->update_flags.raw) - || pipe->update_flags.raw) - return true; - if (pipe->bottom_pipe) - return does_pipe_need_lock(pipe->bottom_pipe); - - return false; -} - void dcn20_program_front_end_for_ctx( struct dc *dc, struct dc_state *context) { - const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; int i; struct dce_hwseq *hws = dc->hwseq; - bool pipe_locked[MAX_PIPES] = {false}; DC_LOGGER_INIT(dc->ctx->logger); - /* Carry over GSL groups in case the context is changing. */ - for (i = 0; i < dc->res_pool->pipe_count; i++) - if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream) - context->res_ctx.pipe_ctx[i].stream_res.gsl_group = - dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { + ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); + if (dc->hwss.program_triplebuffer != NULL && + !dc->debug.disable_tri_buf) { + /*turn off triple buffer for full update*/ + dc->hwss.program_triplebuffer( + dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); + } + } + } /* Set pipe update flags and lock pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], &context->res_ctx.pipe_ctx[i]); - for (i = 0; i < dc->res_pool->pipe_count; i++) - if (!context->res_ctx.pipe_ctx[i].top_pipe && - does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) - dc->hwss.pipe_control_lock(dc, pipe_ctx, true); - if (!pipe_ctx->update_flags.bits.enable) - dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true); - pipe_locked[i] = true; - } /* OTG blank before disabling all front ends */ for (i = 0; i < dc->res_pool->pipe_count; i++) @@ -1615,17 +1584,17 @@ void dcn20_program_front_end_for_ctx( hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); } } +} - /* Unlock all locked pipes */ - for (i = 0; i < dc->res_pool->pipe_count; i++) - if (pipe_locked[i]) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; +void dcn20_post_unlock_program_front_end( + struct dc *dc, + struct dc_state *context) +{ + int i; + const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; + struct dce_hwseq *hwseq = dc->hwseq; - if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) - dc->hwss.pipe_control_lock(dc, pipe_ctx, false); - if (!pipe_ctx->update_flags.bits.enable) - dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false); - } + DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) @@ -1651,11 +1620,26 @@ void dcn20_program_front_end_for_ctx( } /* WA to apply WM setting*/ - if (dc->hwseq->wa.DEGVIDCN21) + if (hwseq->wa.DEGVIDCN21) dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); -} + /* WA for stutter underflow during MPO transitions when adding 2nd plane */ + if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) { + + if (dc->current_state->stream_status[0].plane_count == 1 && + context->stream_status[0].plane_count > 1) { + + struct timing_generator *tg = dc->res_pool->timing_generators[0]; + + dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false); + + hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true; + hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg); + } + } +} + void dcn20_prepare_bandwidth( struct dc *dc, struct dc_state *context) @@ -1668,7 +1652,7 @@ void dcn20_prepare_bandwidth( false); /* program dchubbub watermarks */ - hubbub->funcs->program_watermarks(hubbub, + dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); @@ -2052,6 +2036,10 @@ static void dcn20_reset_back_end_for_pipe( * parent pipe. */ if (pipe_ctx->top_pipe == NULL) { + + if (pipe_ctx->stream_res.abm) + pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm); + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); @@ -2306,7 +2294,8 @@ void dcn20_fpga_init_hw(struct dc *dc) REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); + if (REG(REFCLK_CNTL)) + REG_WRITE(REFCLK_CNTL, 0); // diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 02c9be5ebd47..63ce763f148e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -35,6 +35,9 @@ bool dcn20_set_shaper_3dlut( void dcn20_program_front_end_for_ctx( struct dc *dc, struct dc_state *context); +void dcn20_post_unlock_program_front_end( + struct dc *dc, + struct dc_state *context); void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -58,10 +61,6 @@ void dcn20_pipe_control_lock( struct dc *dc, struct pipe_ctx *pipe, bool lock); -void dcn20_pipe_control_lock_global( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); void dcn20_prepare_bandwidth( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 5e640f17d3d4..8334bbd6eabb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -33,6 +33,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, @@ -50,7 +51,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, .pipe_control_lock = dcn20_pipe_control_lock, - .pipe_control_lock_global = dcn20_pipe_control_lock_global, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, @@ -81,6 +83,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .init_vm_ctx = dcn20_init_vm_ctx, .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, }; static const struct hwseq_private_funcs dcn20_private_funcs = { @@ -96,6 +99,8 @@ static const struct hwseq_private_funcs dcn20_private_funcs = { .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, + .is_panel_backlight_on = dce110_is_panel_backlight_on, + .is_panel_powered_on = dce110_is_panel_powered_on, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h index 3fccd5eeecbb..7bcee5894d2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h @@ -36,26 +36,6 @@ #define BASE(seg) \ BASE_INNER(seg) -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - - #define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \ SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\ SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index de9c857ab3e9..570dfd9a243f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = { .mpc_init = mpc1_mpc_init, .mpc_init_single_inst = mpc1_mpc_init_single_inst, .update_blending = mpc2_update_blending, + .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp, .wait_for_idle = mpc2_assert_idle_mpcc, .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h index c78fd5123497..496658f420db 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h @@ -179,7 +179,8 @@ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh) + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ + SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) /* * DCN2 MPC_OCSC debug status register: diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 023cc71fad0f..138321e151eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator( enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height) + int height, + int offset) { struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); enum test_pattern_color_format bit_depth; @@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator( DPG_ACTIVE_WIDTH, width, DPG_ACTIVE_HEIGHT, height); + /* set DPG offset */ + REG_SET_2(DPG_OFFSET_SEGMENT, 0, + DPG_X_OFFSET, offset, + DPG_SEGMENT_WIDTH, 0); + switch (test_pattern) { case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index 4093bec172c1..64c5b429c79a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -36,6 +36,7 @@ #define OPP_DPG_REG_LIST(id) \ SRI(DPG_CONTROL, DPG, id), \ SRI(DPG_DIMENSIONS, DPG, id), \ + SRI(DPG_OFFSET_SEGMENT, DPG, id), \ SRI(DPG_COLOUR_B_CB, DPG, id), \ SRI(DPG_COLOUR_G_Y, DPG, id), \ SRI(DPG_COLOUR_R_CR, DPG, id), \ @@ -53,6 +54,7 @@ uint32_t FMT_422_CONTROL; \ uint32_t DPG_CONTROL; \ uint32_t DPG_DIMENSIONS; \ + uint32_t DPG_OFFSET_SEGMENT; \ uint32_t DPG_COLOUR_B_CB; \ uint32_t DPG_COLOUR_G_Y; \ uint32_t DPG_COLOUR_R_CR; \ @@ -68,6 +70,8 @@ OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \ OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \ OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \ + OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \ + OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \ @@ -97,6 +101,8 @@ type DPG_HRES; \ type DPG_ACTIVE_WIDTH; \ type DPG_ACTIVE_HEIGHT; \ + type DPG_X_OFFSET; \ + type DPG_SEGMENT_WIDTH; \ type DPG_COLOUR0_R_CR; \ type DPG_COLOUR1_R_CR; \ type DPG_COLOUR0_B_CB; \ @@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator( enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height); + int height, + int offset); bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index e310d67c399a..e4348e3b6389 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .xfc_supported = true, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { @@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { .xfc_supported = true, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, - .ptoi_supported = 0 + .ptoi_supported = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { @@ -506,6 +508,10 @@ enum dcn20_clk_src_array_id { .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIO_BASE__INST0_SEG ## seg @@ -1010,7 +1016,8 @@ static const struct dc_plane_cap plane_cap = { .pixel_format_support = { .argb8888 = true, .nv12 = true, - .fp16 = true + .fp16 = true, + .p010 = true }, .max_upscale_factor = { @@ -1039,7 +1046,7 @@ static const struct resource_caps res_cap_nv14 = { }; static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, @@ -1058,7 +1065,7 @@ static const struct dc_debug_options debug_defaults_drv = { }; static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, @@ -1254,6 +1261,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, + .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -1668,7 +1676,7 @@ static void acquire_dsc(struct resource_context *res_ctx, } } -static void release_dsc(struct resource_context *res_ctx, +void dcn20_release_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc) { @@ -1728,7 +1736,7 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream_res.dsc) - release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); + dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); } } @@ -1972,22 +1980,6 @@ void dcn20_populate_dml_writeback_from_context( } -static int get_num_odm_heads(struct pipe_ctx *pipe) -{ - int odm_head_count = 0; - struct pipe_ctx *next_pipe = pipe->next_odm_pipe; - while (next_pipe) { - odm_head_count++; - next_pipe = next_pipe->next_odm_pipe; - } - pipe = pipe->prev_odm_pipe; - while (pipe) { - odm_head_count++; - pipe = pipe->prev_odm_pipe; - } - return odm_head_count ? odm_head_count + 1 : 0; -} - int dcn20_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { @@ -2067,8 +2059,8 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dp_lanes = 4; pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; - switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) { - case 2: + switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) { + case 1: pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1; break; default: @@ -2076,9 +2068,14 @@ int dcn20_populate_dml_pipes_from_context( } pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state - == res_ctx->pipe_ctx[i].plane_state) - pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx; - else if (res_ctx->pipe_ctx[i].prev_odm_pipe) { + == res_ctx->pipe_ctx[i].plane_state) { + struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe; + + while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state + == res_ctx->pipe_ctx[i].plane_state) + first_pipe = first_pipe->top_pipe; + pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx; + } else if (res_ctx->pipe_ctx[i].prev_odm_pipe) { struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe; while (first_pipe->prev_odm_pipe) @@ -2163,16 +2160,20 @@ int dcn20_populate_dml_pipes_from_context( /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* - * Use max cursor settings for calculations to minimize + * For graphic plane, cursor number is 1, nv12 is 0 * bw calculations due to cursor on/off */ - pipes[pipe_cnt].pipe.src.num_cursors = 2; + if (res_ctx->pipe_ctx[i].plane_state && + res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) + pipes[pipe_cnt].pipe.src.num_cursors = 0; + else + pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors; + pipes[pipe_cnt].pipe.src.cur0_src_width = 256; pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit; - pipes[pipe_cnt].pipe.src.cur1_src_width = 256; - pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit; if (!res_ctx->pipe_ctx[i].plane_state) { + pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; pipes[pipe_cnt].pipe.src.source_scan = dm_horz; pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear; pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile; @@ -2198,19 +2199,21 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/ pipes[pipe_cnt].pipe.scale_taps.htaps = 1; pipes[pipe_cnt].pipe.scale_taps.vtaps = 1; - pipes[pipe_cnt].pipe.src.is_hsplit = 0; - pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipes[pipe_cnt].pipe.dest.vtotal_min = v_total; pipes[pipe_cnt].pipe.dest.vtotal_max = v_total; + + if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) { + pipes[pipe_cnt].pipe.src.viewport_width /= 2; + pipes[pipe_cnt].pipe.dest.recout_width /= 2; + } } else { struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state; struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate; - pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe - && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) - || (res_ctx->pipe_ctx[i].top_pipe - && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln); + pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) + || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) + || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y; @@ -2235,18 +2238,22 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable; pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width; pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height; - pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width; pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height; - if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) { - pipes[pipe_cnt].pipe.dest.full_recout_width += - res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width; - pipes[pipe_cnt].pipe.dest.full_recout_height += - res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height; - } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) { - pipes[pipe_cnt].pipe.dest.full_recout_width += - res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width; - pipes[pipe_cnt].pipe.dest.full_recout_height += - res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height; + pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width; + if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) + pipes[pipe_cnt].pipe.dest.full_recout_width *= 2; + else { + struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe; + + while (split_pipe && split_pipe->plane_state == pln) { + pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->bottom_pipe; + } + split_pipe = res_ctx->pipe_ctx[i].top_pipe; + while (split_pipe && split_pipe->plane_state == pln) { + pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->top_pipe; + } } pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16; @@ -2413,6 +2420,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -2499,7 +2507,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, return secondary_pipe; } -void dcn20_merge_pipes_for_validate( +static void dcn20_merge_pipes_for_validate( struct dc *dc, struct dc_state *context) { @@ -2524,7 +2532,7 @@ void dcn20_merge_pipes_for_validate( odm_pipe->prev_odm_pipe = NULL; odm_pipe->next_odm_pipe = NULL; if (odm_pipe->stream_res.dsc) - release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); + dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); /* Clear plane_res and stream_res */ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); @@ -2562,41 +2570,29 @@ int dcn20_validate_apply_pipe_split_flags( struct dc *dc, struct dc_state *context, int vlevel, - bool *split) + bool *split, + bool *merge) { int i, pipe_idx, vlevel_split; + int plane_count = 0; bool force_split = false; - bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; + bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; - /* Single display loop, exits if there is more than one display */ + if (context->stream_count > 1) { + if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) + avoid_split = true; + } else if (dc->debug.force_single_disp_pipe_split) + force_split = true; + + /* TODO: fix dc bugs and remove this split threshold thing */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - bool exit_loop = false; - if (!pipe->stream || pipe->top_pipe) - continue; - - if (dc->debug.force_single_disp_pipe_split) { - if (!force_split) - force_split = true; - else { - force_split = false; - exit_loop = true; - } - } - if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) { - if (avoid_split) - avoid_split = false; - else { - avoid_split = true; - exit_loop = true; - } - } - if (exit_loop) - break; + if (pipe->stream && !pipe->prev_odm_pipe && + (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) + ++plane_count; } - /* TODO: fix dc bugs and remove this split threshold thing */ - if (context->stream_count > dc->res_pool->pipe_count / 2) + if (plane_count > dc->res_pool->pipe_count / 2) avoid_split = true; /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ @@ -2619,11 +2615,12 @@ int dcn20_validate_apply_pipe_split_flags( /* Split loop sets which pipe should be split based on dml outputs and dc flags */ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx]; if (!context->res_ctx.pipe_ctx[i].stream) continue; - if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1) + if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1) split[i] = true; if ((pipe->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || @@ -2636,10 +2633,44 @@ int dcn20_validate_apply_pipe_split_flags( split[i] = true; if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { split[i] = true; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1; + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; } - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane]; + + if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) { + /*Already split odm pipe tree, don't try to split again*/ + split[i] = false; + split[pipe->prev_odm_pipe->pipe_idx] = false; + } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state + && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { + /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/ + split[i] = false; + split[pipe->top_pipe->pipe_idx] = false; + } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) { + if (split[i] == false) { + /*Exiting mpc/odm combine*/ + merge[i] = true; + if (pipe->prev_odm_pipe) { + ASSERT(0); /*should not actually happen yet*/ + merge[pipe->prev_odm_pipe->pipe_idx] = true; + } else + merge[pipe->top_pipe->pipe_idx] = true; + } else { + /*Transition from mpc combine to odm combine or vice versa*/ + ASSERT(0); /*should not actually happen yet*/ + split[i] = true; + merge[i] = true; + if (pipe->prev_odm_pipe) { + split[pipe->prev_odm_pipe->pipe_idx] = true; + merge[pipe->prev_odm_pipe->pipe_idx] = true; + } else { + split[pipe->top_pipe->pipe_idx] = true; + merge[pipe->top_pipe->pipe_idx] = true; + } + } + } + /* Adjust dppclk when split is forced, do not bother with dispclk */ if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; @@ -2681,7 +2712,7 @@ bool dcn20_fast_validate_bw( if (vlevel > context->bw_ctx.dml.soc.num_states) goto validate_fail; - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split); + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL); /*initialize pipe_just_split_from to invalid idx*/ for (i = 0; i < MAX_PIPES; i++) @@ -2901,6 +2932,9 @@ void dcn20_calculate_dlg_params( != dm_dram_clock_change_unsupported; context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; + if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) + context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; + /* * An artifact of dml pipe split/odm is that pipes get merged back together for * calculation. Therefore we need to only extract for first pipe in ascending index order @@ -3034,25 +3068,32 @@ validate_out: return out; } - -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) +/* + * This must be noinline to ensure anything that deals with FP registers + * is contained within this call; previously our compiling with hard-float + * would result in fp instructions being emitted outside of the boundaries + * of the DC_FP_START/END macros, which makes sense as the compiler has no + * idea about what is wrapped and what is not + * + * This is largely just a workaround to avoid breakage introduced with 5.6, + * ideally all fp-using code should be moved into its own file, only that + * should be compiled with hard-float, and all code exported from there + * should be strictly wrapped with DC_FP_START/END + */ +static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, + struct dc_state *context, bool fast_validate) { bool voltage_supported = false; bool full_pstate_supported = false; bool dummy_pstate_supported = false; double p_state_latency_us; - DC_FP_START(); p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = dc->debug.disable_dram_clock_change_vactive_support; if (fast_validate) { - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true); - - DC_FP_END(); - return voltage_supported; + return dcn20_validate_bandwidth_internal(dc, context, true); } // Best case, we support full UCLK switch latency @@ -3081,7 +3122,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, restore_dml_state: context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; + return voltage_supported; +} +bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, + bool fast_validate) +{ + bool voltage_supported = false; + DC_FP_START(); + voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate); DC_FP_END(); return voltage_supported; } @@ -3138,7 +3187,7 @@ static struct dc_cap_funcs cap_funcs = { }; -enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state) +enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -3164,7 +3213,7 @@ static struct resource_funcs dcn20_res_pool_funcs = { .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, - .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link @@ -3313,7 +3362,7 @@ void dcn20_cap_soc_clocks( void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { - struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES]; + struct _vcs_dpi_voltage_scaling_st calculated_states[DC__VOLTAGE_STATES]; int i; int num_calculated_states = 0; int min_dcfclk = 0; @@ -3886,6 +3935,15 @@ static bool dcn20_resource_construct( dcn20_hw_sequencer_construct(dc); + // IF NV12, set PG function pointer to NULL. It's not that + // PG isn't supported for NV12, it's that we don't want to + // program the registers because that will cause more power + // to be consumed. We could have created dcn20_init_hw to get + // the same effect by checking ASIC rev, but there was a + // request at some point to not check ASIC rev on hw sequencer. + if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) + dc->hwseq->funcs.enable_power_gating_plane = NULL; + dc->caps.max_planes = pool->base.pipe_count; for (i = 0; i < dc->caps.max_planes; ++i) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index f5893840b79b..9d5bff9455fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -119,14 +119,15 @@ void dcn20_set_mcif_arb_params( display_e2e_pipe_params_st *pipes, int pipe_cnt); bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); -void dcn20_merge_pipes_for_validate( - struct dc *dc, - struct dc_state *context); int dcn20_validate_apply_pipe_split_flags( struct dc *dc, struct dc_state *context, int vlevel, - bool *split); + bool *split, + bool *merge); +void dcn20_release_dsc(struct resource_context *res_ctx, + const struct resource_pool *pool, + struct display_stream_compressor **dsc); bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, @@ -159,7 +160,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); -enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state); +enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state); void dcn20_patch_bounding_box( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 9b70a1e7b962..99a7ef6ab878 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -616,5 +616,6 @@ void dcn20_stream_encoder_construct( enc1->regs = regs; enc1->se_shift = se_shift; enc1->se_mask = se_mask; + enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h index 02fafb013fc6..f1ef46e8da5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h @@ -34,13 +34,6 @@ #define BASE(seg) \ BASE_INNER(seg) -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - #define DCN20_VMID_REG_LIST(id)\ SRI(CNTL, DCN_VM_CONTEXT, id),\ SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index f546260c15b7..5e2d14b897af 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -141,7 +141,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub, return NUM_VMID; } -void hubbub21_program_urgent_watermarks( +bool hubbub21_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -149,6 +149,7 @@ void hubbub21_program_urgent_watermarks( { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; + bool wm_pending = false; /* Repeat for water mark set A, B, C and D. */ /* clock state A */ @@ -163,7 +164,8 @@ void hubbub21_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.urgent_ns, prog_wm_value); - } + } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns) + wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->a.frac_urg_bw_flip @@ -172,7 +174,9 @@ void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); - } + } else if (watermarks->a.frac_urg_bw_flip + < hubbub1->watermarks.a.frac_urg_bw_flip) + wm_pending = true; if (safe_to_lower || watermarks->a.frac_urg_bw_nom > hubbub1->watermarks.a.frac_urg_bw_nom) { @@ -180,14 +184,18 @@ void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); - } + } else if (watermarks->a.frac_urg_bw_nom + < hubbub1->watermarks.a.frac_urg_bw_nom) + wm_pending = true; + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) { hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, refclk_mhz, 0x1fffff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); - } + } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns) + wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { @@ -201,7 +209,8 @@ void hubbub21_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.urgent_ns, prog_wm_value); - } + } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns) + wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->a.frac_urg_bw_flip @@ -210,7 +219,9 @@ void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip); - } + } else if (watermarks->a.frac_urg_bw_flip + < hubbub1->watermarks.a.frac_urg_bw_flip) + wm_pending = true; if (safe_to_lower || watermarks->a.frac_urg_bw_nom > hubbub1->watermarks.a.frac_urg_bw_nom) { @@ -218,7 +229,9 @@ void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom); - } + } else if (watermarks->a.frac_urg_bw_nom + < hubbub1->watermarks.a.frac_urg_bw_nom) + wm_pending = true; if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) { hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; @@ -226,7 +239,8 @@ void hubbub21_program_urgent_watermarks( refclk_mhz, 0x1fffff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); - } + } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns) + wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { @@ -240,7 +254,8 @@ void hubbub21_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.urgent_ns, prog_wm_value); - } + } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns) + wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->a.frac_urg_bw_flip @@ -249,7 +264,9 @@ void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip); - } + } else if (watermarks->a.frac_urg_bw_flip + < hubbub1->watermarks.a.frac_urg_bw_flip) + wm_pending = true; if (safe_to_lower || watermarks->a.frac_urg_bw_nom > hubbub1->watermarks.a.frac_urg_bw_nom) { @@ -257,7 +274,9 @@ void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom); - } + } else if (watermarks->a.frac_urg_bw_nom + < hubbub1->watermarks.a.frac_urg_bw_nom) + wm_pending = true; if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) { hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; @@ -265,7 +284,8 @@ void hubbub21_program_urgent_watermarks( refclk_mhz, 0x1fffff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); - } + } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns) + wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { @@ -279,7 +299,8 @@ void hubbub21_program_urgent_watermarks( DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.urgent_ns, prog_wm_value); - } + } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns) + wm_pending = true; /* determine the transfer time for a quantity of data for a particular requestor.*/ if (safe_to_lower || watermarks->a.frac_urg_bw_flip @@ -288,7 +309,9 @@ void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip); - } + } else if (watermarks->a.frac_urg_bw_flip + < hubbub1->watermarks.a.frac_urg_bw_flip) + wm_pending = true; if (safe_to_lower || watermarks->a.frac_urg_bw_nom > hubbub1->watermarks.a.frac_urg_bw_nom) { @@ -296,7 +319,9 @@ void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom); - } + } else if (watermarks->a.frac_urg_bw_nom + < hubbub1->watermarks.a.frac_urg_bw_nom) + wm_pending = true; if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) { hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; @@ -304,10 +329,13 @@ void hubbub21_program_urgent_watermarks( refclk_mhz, 0x1fffff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); - } + } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns) + wm_pending = true; + + return wm_pending; } -void hubbub21_program_stutter_watermarks( +bool hubbub21_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -315,6 +343,7 @@ void hubbub21_program_stutter_watermarks( { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; + bool wm_pending = false; /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns @@ -330,7 +359,9 @@ void hubbub21_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } + } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) { @@ -345,7 +376,9 @@ void hubbub21_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); - } + } else if (watermarks->a.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) + wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns @@ -361,7 +394,9 @@ void hubbub21_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } + } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) { @@ -376,7 +411,9 @@ void hubbub21_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); - } + } else if (watermarks->b.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) + wm_pending = true; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns @@ -392,7 +429,9 @@ void hubbub21_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } + } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { @@ -407,7 +446,9 @@ void hubbub21_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); - } + } else if (watermarks->c.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) + wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns @@ -423,7 +464,9 @@ void hubbub21_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } + } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) { @@ -438,10 +481,14 @@ void hubbub21_program_stutter_watermarks( DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); - } + } else if (watermarks->d.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + return wm_pending; } -void hubbub21_program_pstate_watermarks( +bool hubbub21_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -450,6 +497,8 @@ void hubbub21_program_pstate_watermarks( struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; + bool wm_pending = false; + /* clock state A */ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) { @@ -464,7 +513,9 @@ void hubbub21_program_pstate_watermarks( DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); - } + } else if (watermarks->a.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) + wm_pending = true; /* clock state B */ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns @@ -480,7 +531,9 @@ void hubbub21_program_pstate_watermarks( DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); - } + } else if (watermarks->b.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) + wm_pending = false; /* clock state C */ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns @@ -496,7 +549,9 @@ void hubbub21_program_pstate_watermarks( DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); - } + } else if (watermarks->c.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) + wm_pending = true; /* clock state D */ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns @@ -512,20 +567,30 @@ void hubbub21_program_pstate_watermarks( DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); - } + } else if (watermarks->d.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) + wm_pending = true; + + return wm_pending; } -void hubbub21_program_watermarks( +bool hubbub21_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + bool wm_pending = false; + + if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; - hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); - hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); - hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); + if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; /* * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. @@ -549,6 +614,8 @@ void hubbub21_program_watermarks( DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF); hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + + return wm_pending; } void hubbub21_wm_read_state(struct hubbub *hubbub, @@ -635,6 +702,7 @@ static const struct hubbub_funcs hubbub21_funcs = { .wm_read_state = hubbub21_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub21_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index c4840dfb1fa5..ef3ef28509ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -113,22 +113,22 @@ void dcn21_dchvm_init(struct hubbub *hubbub); int hubbub21_init_dchub(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); -void hubbub21_program_watermarks( +bool hubbub21_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); -void hubbub21_program_urgent_watermarks( +bool hubbub21_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); -void hubbub21_program_stutter_watermarks( +bool hubbub21_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); -void hubbub21_program_pstate_watermarks( +bool hubbub21_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index cf09b9335728..d285ba622d61 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -79,32 +79,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline( struct _vcs_dpi_display_dlg_regs_st *dlg_attr) { struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); - uint32_t cur_value; + uint32_t refcyc_per_vm_group_vblank; + uint32_t refcyc_per_vm_req_vblank; + uint32_t refcyc_per_vm_group_flip; + uint32_t refcyc_per_vm_req_flip; + const uint32_t uninitialized_hw_default = 0; - REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_group_vblank) + REG_GET(VBLANK_PARAMETERS_5, + REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank); + + if (refcyc_per_vm_group_vblank == uninitialized_hw_default || + refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank) REG_SET(VBLANK_PARAMETERS_5, 0, REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank); REG_GET(VBLANK_PARAMETERS_6, - REFCYC_PER_VM_REQ_VBLANK, - &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_req_vblank) + REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank); + + if (refcyc_per_vm_req_vblank == uninitialized_hw_default || + refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank) REG_SET(VBLANK_PARAMETERS_6, 0, REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank); - REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_group_flip) + REG_GET(FLIP_PARAMETERS_3, + REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip); + + if (refcyc_per_vm_group_flip == uninitialized_hw_default || + refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip) REG_SET(FLIP_PARAMETERS_3, 0, REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip); - REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_req_flip) + REG_GET(FLIP_PARAMETERS_4, + REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip); + + if (refcyc_per_vm_req_flip == uninitialized_hw_default || + refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip) REG_SET(FLIP_PARAMETERS_4, 0, REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip); REG_SET(FLIP_PARAMETERS_5, 0, REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c); + REG_SET(FLIP_PARAMETERS_6, 0, REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c); } @@ -325,13 +340,9 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, { struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); - PHYSICAL_ADDRESS_LOC mc_vm_apt_default; PHYSICAL_ADDRESS_LOC mc_vm_apt_low; PHYSICAL_ADDRESS_LOC mc_vm_apt_high; - // The format of default addr is 48:12 of the 48 bit addr - mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12; - // The format of high/low are 48:18 of the 48 bit addr mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18; mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 081ad8e43d58..ada65b1a7eb1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -112,3 +112,25 @@ void dcn21_optimize_pwr_state( true); } +/* If user hotplug a HDMI monitor while in monitor off, + * OS will do a mode set (with output timing) but keep output off. + * In this case DAL will ask vbios to power up the pll in the PHY. + * If user unplug the monitor (while we are on monitor off) or + * system attempt to enter modern standby (which we will disable PLL), + * PHY will hang on the next mode set attempt. + * if enable PLL follow by disable PLL (without executing lane enable/disable), + * RDPCS_PHY_DP_MPLLB_STATE remains 1, + * which indicate that PLL disable attempt actually didn’t go through. + * As a workaround, insert PHY lane enable/disable before PLL disable. + */ +void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx) +{ + if (!pipe_ctx->stream->dpms_off) + return; + + pipe_ctx->stream->dpms_off = false; + core_link_enable_stream(context, pipe_ctx); + core_link_disable_stream(pipe_ctx); + pipe_ctx->stream->dpms_off = true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h index 182736096123..26bf24d3b59f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -44,4 +44,7 @@ void dcn21_optimize_pwr_state( const struct dc *dc, struct dc_state *context); +void dcn21_PLAT_58856_wa(struct dc_state *context, + struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index fddbd59bf4f9..4dd634118df2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, @@ -51,7 +52,8 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, .pipe_control_lock = dcn20_pipe_control_lock, - .pipe_control_lock_global = dcn20_pipe_control_lock_global, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, @@ -84,6 +86,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, @@ -104,6 +107,8 @@ static const struct hwseq_private_funcs dcn21_private_funcs = { .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, .enable_stream_timing = dcn20_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, + .is_panel_backlight_on = dce110_is_panel_backlight_on, + .is_panel_powered_on = dce110_is_panel_powered_on, .disable_stream_gating = dcn20_disable_stream_gating, .enable_stream_gating = dcn20_enable_stream_gating, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, @@ -127,6 +132,7 @@ static const struct hwseq_private_funcs dcn21_private_funcs = { .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn20_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .PLAT_58856_wa = dcn21_PLAT_58856_wa, }; void dcn21_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 33d0a176841a..a721bb401ef0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -84,7 +84,7 @@ #include "dcn21_resource.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" -#include "../dce/dmub_psr.h" +#include "dce/dmub_psr.h" #define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) @@ -156,17 +156,18 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { .xfc_supported = false, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, - .ptoi_supported = 0 + .ptoi_supported = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .clock_limits = { { .state = 0, - .dcfclk_mhz = 304.0, - .fabricclk_mhz = 600.0, - .dispclk_mhz = 618.0, - .dppclk_mhz = 440.0, + .dcfclk_mhz = 400.0, + .fabricclk_mhz = 400.0, + .dispclk_mhz = 600.0, + .dppclk_mhz = 400.00, .phyclk_mhz = 600.0, .socclk_mhz = 278.0, .dscclk_mhz = 205.67, @@ -174,10 +175,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { }, { .state = 1, - .dcfclk_mhz = 304.0, - .fabricclk_mhz = 600.0, - .dispclk_mhz = 618.0, - .dppclk_mhz = 618.0, + .dcfclk_mhz = 464.52, + .fabricclk_mhz = 800.0, + .dispclk_mhz = 654.55, + .dppclk_mhz = 626.09, .phyclk_mhz = 600.0, .socclk_mhz = 278.0, .dscclk_mhz = 205.67, @@ -185,32 +186,65 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { }, { .state = 2, - .dcfclk_mhz = 608.0, - .fabricclk_mhz = 1066.0, - .dispclk_mhz = 888.0, - .dppclk_mhz = 888.0, - .phyclk_mhz = 810.0, + .dcfclk_mhz = 514.29, + .fabricclk_mhz = 933.0, + .dispclk_mhz = 757.89, + .dppclk_mhz = 685.71, + .phyclk_mhz = 600.0, .socclk_mhz = 278.0, .dscclk_mhz = 287.67, - .dram_speed_mts = 2133.0, + .dram_speed_mts = 1866.0, }, { .state = 3, - .dcfclk_mhz = 676.0, - .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dcfclk_mhz = 576.00, + .fabricclk_mhz = 1067.0, + .dispclk_mhz = 847.06, + .dppclk_mhz = 757.89, + .phyclk_mhz = 600.0, .socclk_mhz = 715.0, .dscclk_mhz = 318.334, - .dram_speed_mts = 4266.0, + .dram_speed_mts = 2134.0, }, { .state = 4, - .dcfclk_mhz = 810.0, + .dcfclk_mhz = 626.09, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 900.00, + .dppclk_mhz = 847.06, + .phyclk_mhz = 810.0, + .socclk_mhz = 953.0, + .dscclk_mhz = 489.0, + .dram_speed_mts = 2400.0, + }, + { + .state = 5, + .dcfclk_mhz = 685.71, + .fabricclk_mhz = 1333.0, + .dispclk_mhz = 1028.57, + .dppclk_mhz = 960.00, + .phyclk_mhz = 810.0, + .socclk_mhz = 278.0, + .dscclk_mhz = 287.67, + .dram_speed_mts = 2666.0, + }, + { + .state = 6, + .dcfclk_mhz = 757.89, + .fabricclk_mhz = 1467.0, + .dispclk_mhz = 1107.69, + .dppclk_mhz = 1028.57, + .phyclk_mhz = 810.0, + .socclk_mhz = 715.0, + .dscclk_mhz = 318.334, + .dram_speed_mts = 3200.0, + }, + { + .state = 7, + .dcfclk_mhz = 847.06, .fabricclk_mhz = 1600.0, .dispclk_mhz = 1395.0, - .dppclk_mhz = 1285.0, + .dppclk_mhz = 1285.00, .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, .dscclk_mhz = 489.0, @@ -218,8 +252,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { }, /*Extra state, no dispclk ramping*/ { - .state = 5, - .dcfclk_mhz = 810.0, + .state = 8, + .dcfclk_mhz = 847.06, .fabricclk_mhz = 1600.0, .dispclk_mhz = 1395.0, .dppclk_mhz = 1285.0, @@ -250,7 +284,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .dram_channel_width_bytes = 4, .fabric_datapath_to_dcn_data_return_bytes = 32, .dcn_downspread_percent = 0.5, - .downspread_percent = 0.5, + .downspread_percent = 0.38, .dram_page_open_time_ns = 50.0, .dram_rw_turnaround_time_ns = 17.5, .dram_return_buffer_per_channel_bytes = 8192, @@ -266,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .xfc_bus_transport_time_us = 4, .xfc_xbuf_latency_tolerance_us = 4, .use_urgent_burst_bw = 1, - .num_states = 5 + .num_states = 8 }; #ifndef MAX @@ -306,6 +340,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + /* NBIO */ #define NBIO_BASE_INNER(seg) \ NBIF0_BASE__INST0_SEG ## seg @@ -804,7 +842,8 @@ static const struct dc_plane_cap plane_cap = { .pixel_format_support = { .argb8888 = true, .nv12 = true, - .fp16 = true + .fp16 = true, + .p010 = true }, .max_upscale_factor = { @@ -821,11 +860,12 @@ static const struct dc_plane_cap plane_cap = { }; static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, + .min_disp_clk_khz = 100000, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, @@ -841,7 +881,7 @@ static const struct dc_debug_options debug_defaults_drv = { }; static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, @@ -962,6 +1002,9 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); @@ -1335,26 +1378,50 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param { struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; - int i; - - dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; - dcn2_1_ip.max_num_dpp = pool->base.pipe_count; - dcn2_1_soc.num_chans = bw_params->num_channels; - - for (i = 0; i < clk_table->num_entries; i++) { + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + unsigned int i, j, closest_clk_lvl; + + // Default clock levels are used for diags, which may lead to overclocking. + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn2_1_ip.max_num_dpp = pool->base.pipe_count; + dcn2_1_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } + } - dcn2_1_soc.clock_limits[i].state = i; - dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + clock_limits[i].state = i; + clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + + clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + for (i = 0; i < clk_table->num_entries; i++) + dcn2_1_soc.clock_limits[i] = clock_limits[i]; + if (clk_table->num_entries) { + dcn2_1_soc.num_states = clk_table->num_entries; + /* duplicate last level */ + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; + } } - dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1]; - dcn2_1_soc.num_states = i; - // diags does not retrieve proper values from SMU, do not update DML instance for diags - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) - dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); } /* Temporary Place holder until we can get them from fuse */ @@ -1476,6 +1543,7 @@ static struct dce_hwseq *dcn21_hwseq_create( hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; hws->wa.DEGVIDCN21 = true; + hws->wa.disallow_self_refresh_during_multi_plane_transition = true; } return hws; } @@ -1499,6 +1567,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, + .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -1639,6 +1708,19 @@ static int dcn21_populate_dml_pipes_from_context( return pipe_cnt; } +enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) +{ + enum dc_status result = DC_OK; + + if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) { + plane_state->dcc.enable = 1; + /* align to our worst case block width */ + plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024; + } + result = dcn20_patch_unknown_plane_state(plane_state); + return result; +} + static struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, .link_enc_create = dcn21_link_encoder_create, @@ -1648,7 +1730,7 @@ static struct resource_funcs dcn21_res_pool_funcs = { .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, - .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn21_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .update_bw_bounding_box = update_bw_bounding_box @@ -1695,6 +1777,7 @@ static bool dcn21_resource_construct( dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; + dc->caps.is_apu = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -1758,9 +1841,15 @@ static bool dcn21_resource_construct( goto create_fail; } - // Leave as NULL to not affect current dmcu psr programming sequence - // Will be uncommented when functionality is confirmed to be working - pool->base.psr = NULL; + if (dc->debug.disable_dmcu) { + pool->base.psr = dmub_psr_create(ctx); + + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } pool->base.abm = dce_abm_create(ctx, &abm_regs, diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 626d22d437f4..968c46dfb506 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -32,6 +32,7 @@ struct cp_psp_stream_config { uint8_t otg_inst; uint8_t link_enc_inst; uint8_t stream_enc_inst; + uint8_t mst_supported; void *dm_stream_ctx; bool dpms_off; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 7ee8b8460a9b..e34c3376efc1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -63,10 +63,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) endif CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ - dml_common_defs.o ifdef CONFIG_DRM_AMD_DC_DCN DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h index ea4cde952f4f..2a1983324629 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h @@ -29,7 +29,7 @@ #define DC__PRESENT 1 #define DC__PRESENT__1 1 #define DC__NUM_DPP 4 -#define DC__VOLTAGE_STATES 7 +#define DC__VOLTAGE_STATES 9 #define DC__NUM_DPP__4 1 #define DC__NUM_DPP__0_PRESENT 1 #define DC__NUM_DPP__1_PRESENT 1 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 485a9c62ec58..5bbbafacc720 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -2614,6 +2614,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP if (mode_lib->vba.DRAMClockChangeSupportsVActive && mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { + if (mode_lib->vba.DRAMClockChangeWatermark > + dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark)) + mode_lib->vba.MinTTUVBlank[k] += 25; + } + } mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else if (mode_lib->vba.DummyPStateCheck && diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h index 8c86b63ddf07..1e557ddcb638 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h @@ -26,7 +26,6 @@ #ifndef __DML20_DISPLAY_RQ_DLG_CALC_H__ #define __DML20_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h index 0378406bf7e7..0d53e871a9d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h @@ -26,7 +26,6 @@ #ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__ #define __DML20V2_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index a38baa73d484..b8ec08e3b7a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -1200,7 +1200,7 @@ static void dml_rq_dlg_get_dlg_params( min_hratio_fact_l = 1.0; min_hratio_fact_c = 1.0; - if (htaps_l <= 1) + if (hratio_l <= 1) min_hratio_fact_l = 2.0; else if (htaps_l <= 6) { if ((hratio_l * 2.0) > 4.0) @@ -1216,7 +1216,7 @@ static void dml_rq_dlg_get_dlg_params( hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz; - if (htaps_c <= 1) + if (hratio_c <= 1) min_hratio_fact_c = 2.0; else if (htaps_c <= 6) { if ((hratio_c * 2.0) > 4.0) @@ -1522,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params( disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; - disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; - disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; + disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); + disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10); // Clamp to max for now if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23)) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h index 83e95f8cbff2..e8f7785e3fc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h @@ -26,7 +26,7 @@ #ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__ #define __DML21_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" +#include "dm_services.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index cf2758ca5b02..c77c3d827e4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -25,8 +25,10 @@ #ifndef __DISPLAY_MODE_LIB_H__ #define __DISPLAY_MODE_LIB_H__ - -#include "dml_common_defs.h" +#include "dm_services.h" +#include "dc_features.h" +#include "display_mode_structs.h" +#include "display_mode_enums.h" #include "display_mode_vba.h" enum dml_project { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 658f81e757e9..687010c17324 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -22,11 +22,12 @@ * Authors: AMD * */ + +#include "dc_features.h" + #ifndef __DISPLAY_MODE_STRUCTS_H__ #define __DISPLAY_MODE_STRUCTS_H__ -#define MAX_CLOCK_LIMIT_STATES 8 - typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st; typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st; typedef struct _vcs_dpi_ip_params_st ip_params_st; @@ -61,12 +62,15 @@ struct _vcs_dpi_voltage_scaling_st { double dram_speed_mts; double fabricclk_mhz; double dispclk_mhz; + double dram_bw_per_chan_gbps; double phyclk_mhz; double dppclk_mhz; double dtbclk_mhz; }; struct _vcs_dpi_soc_bounding_box_st { + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + unsigned int num_states; double sr_exit_time_us; double sr_enter_plus_exit_time_us; double urgent_latency_us; @@ -109,8 +113,7 @@ struct _vcs_dpi_soc_bounding_box_st { double xfc_bus_transport_time_us; double xfc_xbuf_latency_tolerance_us; int use_urgent_burst_bw; - unsigned int num_states; - struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES]; + double min_dcfclk; bool do_urgent_latency_adjustment; double urgent_latency_adjustment_fabric_clock_component_us; double urgent_latency_adjustment_fabric_clock_reference_mhz; @@ -189,7 +192,7 @@ struct _vcs_dpi_ip_params_st { unsigned int min_vblank_lines; unsigned int dppclk_delay_subtotal; unsigned int dispclk_delay_subtotal; - unsigned int dcfclk_cstate_latency; + double dcfclk_cstate_latency; unsigned int dppclk_delay_scl; unsigned int dppclk_delay_scl_lb_only; unsigned int dppclk_delay_cnvc_formatter; @@ -202,6 +205,7 @@ struct _vcs_dpi_ip_params_st { unsigned int LineBufferFixedBpp; unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one; unsigned int bug_forcing_LC_req_same_size_fixed; + unsigned int number_of_cursors; }; struct _vcs_dpi_display_xfc_params_st { @@ -325,7 +329,6 @@ struct _vcs_dpi_display_pipe_dest_params_st { unsigned int vupdate_width; unsigned int vready_offset; unsigned char interlaced; - unsigned char embedded; double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; unsigned char otg_inst; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index b3c96d9b472f..6b525c52124c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -266,8 +266,6 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz; mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz; } - mode_lib->vba.MinVoltageLevel = 0; - mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states; mode_lib->vba.DoUrgentLatencyAdjustment = soc->do_urgent_latency_adjustment; @@ -379,7 +377,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes; - mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] = (enum scan_direction_class) (src->source_scan); @@ -396,11 +393,11 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_y_c; mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch; - mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height; - mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width; + mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_y; + mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_y; mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c; - mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_c; - mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_c; + mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_c; + mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_c; mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch; mode_lib->vba.DCCMetaPitchC[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch_c; mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 2875efd85467..3a734171f083 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -27,8 +27,6 @@ #ifndef __DML2_DISPLAY_MODE_VBA_H__ #define __DML2_DISPLAY_MODE_VBA_H__ -#include "dml_common_defs.h" - struct display_mode_lib; void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib); @@ -389,7 +387,6 @@ struct vba_vars_st { /* vba mode support */ /*inputs*/ - bool EmbeddedPanel[DC__NUM_DPP__MAX]; bool SupportGFX7CompatibleTilingIn32bppAnd64bpp; double MaxHSCLRatio; double MaxVSCLRatio; @@ -842,8 +839,6 @@ struct vba_vars_st { double DCCRateChroma[DC__NUM_DPP__MAX]; double PHYCLKD18PerState[DC__VOLTAGE_STATES + 1]; - int MinVoltageLevel; - int MaxVoltageLevel; bool WritebackSupportInterleaveAndUsingWholeBufferForASingleStream; bool NumberOfHDMIFRLSupport; @@ -880,7 +875,6 @@ struct vba_vars_st { double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2]; double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2]; double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2]; - bool UseMinimumRequiredDCFCLK; double WritebackDelayTime[DC__NUM_DPP__MAX]; unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX]; unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h index 1f24db830737..2555ef0358c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h @@ -26,7 +26,6 @@ #ifndef __DISPLAY_RQ_DLG_HELPERS_H__ #define __DISPLAY_RQ_DLG_HELPERS_H__ -#include "dml_common_defs.h" #include "display_mode_lib.h" /* Function: Printer functions diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h index 304164986bd8..9c06913ad767 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h @@ -26,8 +26,6 @@ #ifndef __DISPLAY_RQ_DLG_CALC_H__ #define __DISPLAY_RQ_DLG_CALC_H__ -#include "dml_common_defs.h" - struct display_mode_lib; #include "display_rq_dlg_helpers.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index ded71ea82413..02e06c9b3230 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -26,7 +26,6 @@ #ifndef __DML_INLINE_DEFS_H__ #define __DML_INLINE_DEFS_H__ -#include "dml_common_defs.h" #include "dcn_calc_math.h" #include "dml_logger.h" @@ -75,6 +74,18 @@ static inline double dml_floor(double a, double granularity) return (double) dcn_bw_floor2(a, granularity); } +static inline double dml_round(double a) +{ + double round_pt = 0.5; + double ceil = dml_ceil(a, 1); + double floor = dml_floor(a, 1); + + if (a - floor >= round_pt) + return ceil; + else + return floor; +} + static inline int dml_log2(double x) { return dml_round((double)dcn_bw_log(x, 2)); @@ -112,7 +123,7 @@ static inline double dml_log(double x, double base) static inline unsigned int dml_round_to_multiple(unsigned int num, unsigned int multiple, - bool up) + unsigned char up) { unsigned int remainder; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index d2d36d48caaa..f252af1947c3 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -47,9 +47,9 @@ #include "dce120/hw_factory_dce120.h" #if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/hw_factory_dcn10.h" -#endif #include "dcn20/hw_factory_dcn20.h" #include "dcn21/hw_factory_dcn21.h" +#endif #include "diagnostics/hw_factory_diag.h" diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 5d396657a1ee..04e2c0f74cb0 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -45,9 +45,9 @@ #include "dce120/hw_translate_dce120.h" #if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/hw_translate_dcn10.h" -#endif #include "dcn20/hw_translate_dcn20.h" #include "dcn21/hw_translate_dcn21.h" +#endif #include "diagnostics/hw_translate_diag.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index f285b76888fb..d523fc9547e7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -124,7 +124,7 @@ struct resource_funcs { struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream); - enum dc_status (*get_default_swizzle_mode)( + enum dc_status (*patch_unknown_plane_state)( struct dc_plane_state *plane_state); struct stream_encoder *(*find_first_free_match_stream_enc_for_link)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 8b1f0ce6c2a7..e94e5fbf2aa2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -78,6 +78,8 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool dp_overwrite_extended_receiver_cap(struct dc_link *link); +void dpcd_set_source_specific_data(struct dc_link *link); + void dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index ac530c057ddd..ce65678c03b2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -27,6 +27,7 @@ #define __DAL_CLK_MGR_H__ #include "dc.h" +#include "dm_pp_smu.h" #define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000 @@ -193,6 +194,7 @@ struct clk_mgr { int dentist_vco_freq_khz; struct clk_state_registers_and_bypass boot_snapshot; struct clk_bw_params *bw_params; + struct pp_smu_wm_range_sets ranges; }; /* forward declarations */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 862952c0286a..9311d0de377f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -296,6 +296,10 @@ int clk_mgr_helper_get_active_display_cnt( struct dc *dc, struct dc_state *context); +int clk_mgr_helper_get_active_plane_cnt( + struct dc *dc, + struct dc_state *context); + #endif //__DAL_CLK_MGR_INTERNAL_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 05ee5295d2c1..336c80a18175 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -27,11 +27,12 @@ #define __DAL_DCCG_H__ #include "dc_types.h" +#include "hw_shared.h" struct dccg { struct dc_context *ctx; const struct dccg_funcs *funcs; - + int pipe_dppclk_khz[MAX_PIPES]; int ref_dppclk; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index c0dc1d0f5cae..f5dd0cc73c63 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -134,7 +134,7 @@ struct hubbub_funcs { unsigned int dccg_ref_freq_inKhz, unsigned int *dchub_ref_freq_inKhz); - void (*program_watermarks)( + bool (*program_watermarks)( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index c59740084ebc..7c2a3328b208 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -39,6 +39,7 @@ struct dsc_config { uint32_t pic_height; enum dc_pixel_encoding pixel_encoding; enum dc_color_depth color_depth; /* Bits per component */ + bool is_odm; struct dc_dsc_config dc_dsc_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index 459f95f52486..f30ab4916242 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -25,16 +25,15 @@ #ifndef __DC_DWBC_H__ #define __DC_DWBC_H__ +#include "dal_types.h" #include "dc_hw_types.h" - #define DWB_SW_V2 1 #define DWB_MCIF_BUF_COUNT 4 /* forward declaration of mcif_wb struct */ struct mcif_wb; -enum dce_version; enum dwb_sw_version { dwb_ver_1_0 = 1, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index fb748f082c56..c2b392a533b1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -68,6 +68,7 @@ struct encoder_feature_support { unsigned int max_hdmi_pixel_clock; bool hdmi_ycbcr420_supported; bool dp_ycbcr420_supported; + bool fec_supported; }; union dpcd_psr_configuration { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 094afc4c8173..50ee8aa7ec3b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -210,6 +210,22 @@ struct mpc_funcs { struct mpcc_blnd_cfg *blnd_cfg, int mpcc_id); + /* + * Lock cursor updates for the specified OPP. + * OPP defines the set of MPCC that are locked together for cursor. + * + * Parameters: + * [in] mpc - MPC context. + * [in] opp_id - The OPP to lock cursor updates on + * [in] lock - lock/unlock the OPP + * + * Return: void + */ + void (*cursor_lock)( + struct mpc *mpc, + int opp_id, + bool lock); + struct mpcc* (*get_mpcc_for_dpp)( struct mpc_tree *tree, int dpp_id); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 7575564b2265..2717352eb697 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -310,7 +310,8 @@ struct opp_funcs { enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height); + int height, + int offset); bool (*dpg_is_blanked)( struct output_pixel_processor *opp); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 351b387ad606..ac6523c0828e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -103,6 +103,7 @@ struct stream_encoder { struct dc_context *ctx; struct dc_bios *bp; enum engine_id id; + uint32_t stream_enc_inst; }; struct enc_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 209118f9f193..08307f3796e3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -66,6 +66,8 @@ struct hw_sequencer_funcs { int num_planes, struct dc_state *context); void (*program_front_end_for_ctx)(struct dc *dc, struct dc_state *context); + void (*post_unlock_program_front_end)(struct dc *dc, + struct dc_state *context); void (*update_plane_addr)(const struct dc *dc, struct pipe_ctx *pipe_ctx); void (*update_dchub)(struct dce_hwseq *hws, @@ -78,17 +80,23 @@ struct hw_sequencer_funcs { void (*update_pending_status)(struct pipe_ctx *pipe_ctx); /* Pipe Lock Related */ - void (*pipe_control_lock_global)(struct dc *dc, - struct pipe_ctx *pipe, bool lock); void (*pipe_control_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock); + void (*interdependent_update_lock)(struct dc *dc, + struct dc_state *context, bool lock); void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx, bool flip_immediate); + void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock); /* Timing Related */ void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, struct crtc_position *position); int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); + void (*calc_vupdate_position)( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + uint32_t *start_line, + uint32_t *end_line); void (*enable_per_frame_crtc_position_reset)(struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_timing_synchronization)(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index ecf566378ccd..52a26e6be066 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -40,10 +40,13 @@ struct dce_hwseq_wa { bool false_optc_underflow; bool DEGVIDCN10_254; bool DEGVIDCN21; + bool disallow_self_refresh_during_multi_plane_transition; }; struct hwseq_wa_state { bool DEGVIDCN10_253_applied; + bool disallow_self_refresh_during_multi_plane_transition_applied; + unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame; }; struct pipe_ctx; @@ -97,6 +100,8 @@ struct hwseq_private_funcs { struct dc *dc); void (*edp_backlight_control)(struct dc_link *link, bool enable); + bool (*is_panel_backlight_on)(struct dc_link *link); + bool (*is_panel_powered_on)(struct dc_link *link); void (*setup_vupdate_interrupt)(struct dc *dc, struct pipe_ctx *pipe_ctx); bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); @@ -140,6 +145,8 @@ struct hwseq_private_funcs { const struct dc_plane_state *plane_state); bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); + void (*PLAT_58856_wa)(struct dc_state *context, + struct pipe_ctx *pipe_ctx); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 5ae8ada154ef..ca4c36c0c9bc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -179,4 +179,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); void get_audio_check(struct audio_info *aud_modes, struct audio_check *aud_chk); + +int get_num_odm_splits(struct pipe_ctx *pipe); + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index c34eba19860a..6d7bca562eec 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -108,7 +108,7 @@ #define ASSERT(expr) ASSERT_CRITICAL(expr) #else -#define ASSERT(expr) WARN_ON(!(expr)) +#define ASSERT(expr) WARN_ON_ONCE(!(expr)) #endif #define BREAK_TO_DEBUGGER() ASSERT(0) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index cd9532b4f14d..10b5fa9d2588 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -50,6 +50,7 @@ enum dmub_cmd_type { DMUB_CMD__REG_REG_WAIT = 4, DMUB_CMD__PLAT_54186_WA = 5, DMUB_CMD__PSR = 64, + DMUB_CMD__ABM = 66, DMUB_CMD__VBIOS = 128, }; @@ -216,7 +217,6 @@ struct dmub_rb_cmd_dpphy_init { struct dmub_cmd_psr_copy_settings_data { uint16_t psr_level; - uint8_t hubp_inst; uint8_t dpp_inst; uint8_t mpcc_inst; uint8_t opp_inst; @@ -225,17 +225,8 @@ struct dmub_cmd_psr_copy_settings_data { uint8_t digbe_inst; uint8_t dpphy_inst; uint8_t aux_inst; - uint8_t hyst_frames; - uint8_t hyst_lines; - uint8_t phy_num; - uint8_t phy_type; - uint8_t aux_repeat; uint8_t smu_optimizations_en; - uint8_t skip_wait_for_pll_lock; uint8_t frame_delay; - uint8_t smu_phy_id; - uint8_t num_of_controllers; - uint8_t link_rate; uint8_t frame_cap_ind; }; @@ -257,13 +248,59 @@ struct dmub_rb_cmd_psr_enable { struct dmub_cmd_header header; }; -struct dmub_cmd_psr_setup_data { +struct dmub_cmd_psr_set_version_data { enum psr_version version; // PSR version 1 or 2 }; -struct dmub_rb_cmd_psr_setup { +struct dmub_rb_cmd_psr_set_version { struct dmub_cmd_header header; - struct dmub_cmd_psr_setup_data psr_setup_data; + struct dmub_cmd_psr_set_version_data psr_set_version_data; +}; + +struct dmub_cmd_abm_set_pipe_data { + uint32_t ramping_boundary; + uint32_t otg_inst; +}; + +struct dmub_rb_cmd_abm_set_pipe { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data; +}; + +struct dmub_cmd_abm_set_backlight_data { + uint32_t frame_ramp; +}; + +struct dmub_rb_cmd_abm_set_backlight { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data; +}; + +struct dmub_cmd_abm_set_level_data { + uint32_t level; +}; + +struct dmub_rb_cmd_abm_set_level { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_level_data abm_set_level_data; +}; + +struct dmub_cmd_abm_set_ambient_level_data { + uint32_t ambient_lux; +}; + +struct dmub_rb_cmd_abm_set_ambient_level { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data; +}; + +struct dmub_cmd_abm_set_pwm_frac_data { + uint32_t fractional_pwm; +}; + +struct dmub_rb_cmd_abm_set_pwm_frac { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data; }; union dmub_rb_cmd { @@ -277,11 +314,16 @@ union dmub_rb_cmd { struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating; struct dmub_rb_cmd_dpphy_init dpphy_init; struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control; - struct dmub_rb_cmd_psr_enable psr_enable; + struct dmub_rb_cmd_psr_set_version psr_set_version; struct dmub_rb_cmd_psr_copy_settings psr_copy_settings; + struct dmub_rb_cmd_psr_enable psr_enable; struct dmub_rb_cmd_psr_set_level psr_set_level; struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa; - struct dmub_rb_cmd_psr_setup psr_setup; + struct dmub_rb_cmd_abm_set_pipe abm_set_pipe; + struct dmub_rb_cmd_abm_set_backlight abm_set_backlight; + struct dmub_rb_cmd_abm_set_level abm_set_level; + struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level; + struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac; }; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h index 7b69eb37f762..d37535d21928 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h @@ -32,7 +32,7 @@ */ enum dmub_cmd_psr_type { - DMUB_CMD__PSR_SETUP = 0, + DMUB_CMD__PSR_SET_VERSION = 0, DMUB_CMD__PSR_COPY_SETTINGS = 1, DMUB_CMD__PSR_ENABLE = 2, DMUB_CMD__PSR_DISABLE = 3, @@ -42,7 +42,16 @@ enum dmub_cmd_psr_type { enum psr_version { PSR_VERSION_1 = 0x10, // PSR Version 1 PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update - PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU + PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU +}; + +enum dmub_cmd_abm_type { + DMUB_CMD__ABM_INIT_CONFIG = 0, + DMUB_CMD__ABM_SET_PIPE = 1, + DMUB_CMD__ABM_SET_BACKLIGHT = 2, + DMUB_CMD__ABM_SET_LEVEL = 3, + DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4, + DMUB_CMD__ABM_SET_PWM_FRAC = 5, }; #endif /* _DMUB_CMD_DAL_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h new file mode 100644 index 000000000000..652d6fc061b6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h @@ -0,0 +1,75 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_GPINT_CMD_H_ +#define _DMUB_GPINT_CMD_H_ + +#include "dmub_types.h" + +/** + * The register format for sending a command via the GPINT. + */ +union dmub_gpint_data_register { + struct { + uint32_t param : 16; + uint32_t command_code : 12; + uint32_t status : 4; + } bits; + uint32_t all; +}; + +/** + * The shifts and masks below may alternatively be used to format and read + * the command register bits. + */ + +#define DMUB_GPINT_DATA_PARAM_MASK 0xFFFF +#define DMUB_GPINT_DATA_PARAM_SHIFT 0 + +#define DMUB_GPINT_DATA_COMMAND_CODE_MASK 0xFFF +#define DMUB_GPINT_DATA_COMMAND_CODE_SHIFT 16 + +#define DMUB_GPINT_DATA_STATUS_MASK 0xF +#define DMUB_GPINT_DATA_STATUS_SHIFT 28 + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_gpint_command { + DMUB_GPINT__INVALID_COMMAND = 0, + DMUB_GPINT__GET_FW_VERSION = 1, + DMUB_GPINT__STOP_FW = 2, + DMUB_GPINT__GET_PSR_STATE = 7, +}; + +/** + * Command responses. + */ + +#define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD + +#endif /* _DMUB_GPINT_CMD_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index f8917594036a..c2671f2616c8 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -66,6 +66,7 @@ #include "dmub_types.h" #include "dmub_cmd.h" +#include "dmub_gpint_cmd.h" #include "dmub_rb.h" #if defined(__cplusplus) @@ -103,7 +104,7 @@ enum dmub_window_id { DMUB_WINDOW_4_MAILBOX, DMUB_WINDOW_5_TRACEBUFF, DMUB_WINDOW_6_FW_STATE, - DMUB_WINDOW_7_RESERVED, + DMUB_WINDOW_7_SCRATCH_MEM, DMUB_WINDOW_TOTAL, }; @@ -262,6 +263,14 @@ struct dmub_srv_hw_funcs { bool (*is_phy_init)(struct dmub_srv *dmub); bool (*is_auto_load_done)(struct dmub_srv *dmub); + + void (*set_gpint)(struct dmub_srv *dmub, + union dmub_gpint_data_register reg); + + bool (*is_gpint_acked)(struct dmub_srv *dmub, + union dmub_gpint_data_register reg); + + uint32_t (*get_gpint_response)(struct dmub_srv *dmub); }; /** @@ -307,6 +316,7 @@ struct dmub_srv { enum dmub_asic asic; void *user_ctx; bool is_virtual; + struct dmub_fb scratch_mem_fb; volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ @@ -516,6 +526,45 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, uint32_t timeout_us); +/** + * dmub_srv_send_gpint_command() - Sends a GPINT based command. + * @dmub: the dmub service + * @command_code: the command code to send + * @param: the command parameter to send + * @timeout_us: the maximum number of microseconds to wait + * + * Sends a command via the general purpose interrupt (GPINT). + * Waits for the number of microseconds specified by timeout_us + * for the command ACK before returning. + * + * Can be called after software initialization. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for ACK timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status +dmub_srv_send_gpint_command(struct dmub_srv *dmub, + enum dmub_gpint_command command_code, + uint16_t param, uint32_t timeout_us); + +/** + * dmub_srv_get_gpint_response() - Queries the GPINT response. + * @dmub: the dmub service + * @response: the response for the last GPINT + * + * Returns the response code for the last GPINT interrupt. + * + * Can be called after software initialization. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub, + uint32_t *response); + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index b2ca8e0dbac9..63bb9e2c81de 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -60,6 +60,12 @@ static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub, { uint32_t tmp; + if (dmub->fb_base || dmub->fb_offset) { + *fb_base = dmub->fb_base; + *fb_offset = dmub->fb_offset; + return; + } + REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); *fb_base = (uint64_t)tmp << 24; @@ -77,11 +83,52 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in, void dmub_dcn20_reset(struct dmub_srv *dmub) { + union dmub_gpint_data_register cmd; + const uint32_t timeout = 30; + uint32_t in_reset, scratch, i; + + REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &in_reset); + + if (in_reset == 0) { + cmd.bits.status = 1; + cmd.bits.command_code = DMUB_GPINT__STOP_FW; + cmd.bits.param = 0; + + dmub->hw_funcs.set_gpint(dmub, cmd); + + /** + * Timeout covers both the ACK and the wait + * for remaining work to finish. + * + * This is mostly bound by the PHY disable sequence. + * Each register check will be greater than 1us, so + * don't bother using udelay. + */ + + for (i = 0; i < timeout; ++i) { + if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) + break; + } + + for (i = 0; i < timeout; ++i) { + scratch = dmub->hw_funcs.get_gpint_response(dmub); + if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) + break; + } + + /* Clear the GPINT command manually so we don't reset again. */ + cmd.all = 0; + dmub->hw_funcs.set_gpint(dmub, cmd); + + /* Force reset in case we timed out, DMCUB is likely hung. */ + } + REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); + REG_WRITE(DMCUB_SCRATCH0, 0); } void dmub_dcn20_reset_release(struct dmub_srv *dmub) @@ -217,3 +264,25 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub) return supported; } + +void dmub_dcn20_set_gpint(struct dmub_srv *dmub, + union dmub_gpint_data_register reg) +{ + REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all); +} + +bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub, + union dmub_gpint_data_register reg) +{ + union dmub_gpint_data_register test; + + reg.bits.status = 0; + test.all = REG_READ(DMCUB_GPINT_DATAIN1); + + return test.all == reg.all; +} + +uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_SCRATCH7); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index 04b0fa13153d..7f046c73927e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -91,6 +91,7 @@ struct dmub_srv; DMUB_SR(DMCUB_SCRATCH13) \ DMUB_SR(DMCUB_SCRATCH14) \ DMUB_SR(DMCUB_SCRATCH15) \ + DMUB_SR(DMCUB_GPINT_DATAIN1) \ DMUB_SR(CC_DC_PIPE_DIS) \ DMUB_SR(MMHUBBUB_SOFT_RESET) \ DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ @@ -183,4 +184,12 @@ bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub); bool dmub_dcn20_is_supported(struct dmub_srv *dmub); +void dmub_dcn20_set_gpint(struct dmub_srv *dmub, + union dmub_gpint_data_register reg); + +bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub, + union dmub_gpint_data_register reg); + +uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub); + #endif /* _DMUB_DCN20_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 85a518bf8a76..ce32cc7933c4 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -52,8 +52,11 @@ /* Default tracebuffer size if meta is absent. */ #define DMUB_TRACE_BUFFER_SIZE (1024) +/* Default scratch mem size. */ +#define DMUB_SCRATCH_MEM_SIZE (256) + /* Number of windows in use. */ -#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1) +#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) /* Base addresses. */ #define DMUB_CW0_BASE (0x60000000) @@ -126,6 +129,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; funcs->is_supported = dmub_dcn20_is_supported; funcs->is_hw_init = dmub_dcn20_is_hw_init; + funcs->set_gpint = dmub_dcn20_set_gpint; + funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked; + funcs->get_gpint_response = dmub_dcn20_get_gpint_response; if (asic == DMUB_ASIC_DCN21) { dmub->regs = &dmub_srv_dcn21_regs; @@ -208,9 +214,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; + struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; const struct dmub_fw_meta_info *fw_info; uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; + uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; if (!dmub->sw_init) return DMUB_STATUS_INVALID; @@ -253,7 +261,10 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, fw_state->base = dmub_align(trace_buff->top, 256); fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); - out->fb_size = dmub_align(fw_state->top, 4096); + scratch_mem->base = dmub_align(fw_state->top, 256); + scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); + + out->fb_size = dmub_align(scratch_mem->top, 4096); return DMUB_STATUS_OK; } @@ -331,6 +342,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; + struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; struct dmub_rb_init_params rb_params; struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; @@ -367,7 +379,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->hw_funcs.reset(dmub); if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb && - fw_state_fb) { + fw_state_fb && scratch_mem_fb) { cw2.offset.quad_part = data_fb->gpu_addr; cw2.region.base = DMUB_CW0_BASE + inst_fb->size; cw2.region.top = cw2.region.base + data_fb->size; @@ -393,6 +405,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->fw_state = fw_state_fb->cpu_addr; + dmub->scratch_mem_fb = *scratch_mem_fb; + if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); @@ -522,3 +536,50 @@ enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, return DMUB_STATUS_TIMEOUT; } + +enum dmub_status +dmub_srv_send_gpint_command(struct dmub_srv *dmub, + enum dmub_gpint_command command_code, + uint16_t param, uint32_t timeout_us) +{ + union dmub_gpint_data_register reg; + uint32_t i; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (!dmub->hw_funcs.set_gpint) + return DMUB_STATUS_INVALID; + + if (!dmub->hw_funcs.is_gpint_acked) + return DMUB_STATUS_INVALID; + + reg.bits.status = 1; + reg.bits.command_code = command_code; + reg.bits.param = param; + + dmub->hw_funcs.set_gpint(dmub, reg); + + for (i = 0; i < timeout_us; ++i) { + if (dmub->hw_funcs.is_gpint_acked(dmub, reg)) + return DMUB_STATUS_OK; + } + + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub, + uint32_t *response) +{ + *response = 0; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (!dmub->hw_funcs.get_gpint_response) + return DMUB_STATUS_INVALID; + + *response = dmub->hw_funcs.get_gpint_response(dmub); + + return DMUB_STATUS_OK; +} diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index a2903985b9e8..2359e88d6029 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -134,31 +134,28 @@ #define PICASSO_A0 0x41 /* DCN1_01 */ #define RAVEN2_A0 0x81 -#define RAVEN2_15D8_REV_94 0x94 -#define RAVEN2_15D8_REV_95 0x95 -#define RAVEN2_15D8_REV_E3 0xE3 -#define RAVEN2_15D8_REV_E4 0xE4 -#define RAVEN2_15D8_REV_E9 0xE9 -#define RAVEN2_15D8_REV_EA 0xEA -#define RAVEN2_15D8_REV_EB 0xEB #define RAVEN1_F0 0xF0 #define RAVEN_UNKNOWN 0xFF +#define RENOIR_A0 0x91 #ifndef ASICREV_IS_RAVEN #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) #endif +#define PRID_DALI_DE 0xDE +#define PRID_DALI_DF 0xDF +#define PRID_DALI_E3 0xE3 +#define PRID_DALI_E4 0xE4 + +#define PRID_POLLOCK_94 0x94 +#define PRID_POLLOCK_95 0x95 +#define PRID_POLLOCK_E9 0xE9 +#define PRID_POLLOCK_EA 0xEA +#define PRID_POLLOCK_EB 0xEB #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) #ifndef ASICREV_IS_RAVEN2 -#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0)) +#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RENOIR_A0)) #endif #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) -#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \ - || (eChipRev == RAVEN2_15D8_REV_E4)) -#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \ - || eChipRev == RAVEN2_15D8_REV_95 \ - || eChipRev == RAVEN2_15D8_REV_E9 \ - || eChipRev == RAVEN2_15D8_REV_EA \ - || eChipRev == RAVEN2_15D8_REV_EB) #define FAMILY_RV 142 /* DCN 1*/ @@ -175,9 +172,7 @@ enum { #define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0) #define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0)) #define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) -#define RENOIR_A0 0x91 -#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir -#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF)) +#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0)) /* * ASIC chip ID @@ -187,6 +182,9 @@ enum { #define DEVICE_ID_TEMASH_9839 0x9839 #define DEVICE_ID_TEMASH_983D 0x983D +/* RENOIR */ +#define DEVICE_ID_RENOIR_1636 0x1636 + /* Asic Family IDs for different asic family. */ #define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */ #define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */ diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index 2c90d1b46c8b..3d29646c7cb4 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -149,4 +149,12 @@ enum dpcd_psr_sink_states { PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7, }; +#define DP_SOURCE_TABLE_REVISION 0x310 +#define DP_SOURCE_PAYLOAD_SIZE 0x311 +#define DP_SOURCE_SINK_CAP 0x317 +#define DP_SOURCE_BACKLIGHT_LEVEL 0x320 +#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 +#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E +#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F + #endif /* __DAL_DPCD_DEFS_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 89a709267019..d66f9d8eefb4 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -124,36 +124,37 @@ enum dc_log_type { #define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \ (1 << LOG_DETECTION_EDID_PARSER)) -#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \ - (1 << LOG_WARNING) | \ - (1 << LOG_EVENT_MODE_SET) | \ - (1 << LOG_EVENT_DETECTION) | \ - (1 << LOG_EVENT_LINK_TRAINING) | \ - (1 << LOG_EVENT_LINK_LOSS) | \ - (1 << LOG_EVENT_UNDERFLOW) | \ - (1 << LOG_RESOURCE) | \ - (1 << LOG_FEATURE_OVERRIDE) | \ - (1 << LOG_DETECTION_EDID_PARSER) | \ - (1 << LOG_DC) | \ - (1 << LOG_HW_HOTPLUG) | \ - (1 << LOG_HW_SET_MODE) | \ - (1 << LOG_HW_RESUME_S3) | \ - (1 << LOG_HW_HPD_IRQ) | \ - (1 << LOG_SYNC) | \ - (1 << LOG_BANDWIDTH_VALIDATION) | \ - (1 << LOG_MST) | \ - (1 << LOG_DETECTION_DP_CAPS) | \ - (1 << LOG_BACKLIGHT)) | \ - (1 << LOG_I2C_AUX) | \ - (1 << LOG_IF_TRACE) | \ - (1 << LOG_DTN) /* | \ - (1 << LOG_DEBUG) | \ - (1 << LOG_BIOS) | \ - (1 << LOG_SURFACE) | \ - (1 << LOG_SCALER) | \ - (1 << LOG_DML) | \ - (1 << LOG_HW_LINK_TRAINING) | \ - (1 << LOG_HW_AUDIO)| \ - (1 << LOG_BANDWIDTH_CALCS)*/ +#define DC_DEFAULT_LOG_MASK ((1ULL << LOG_ERROR) | \ + (1ULL << LOG_WARNING) | \ + (1ULL << LOG_EVENT_MODE_SET) | \ + (1ULL << LOG_EVENT_DETECTION) | \ + (1ULL << LOG_EVENT_LINK_TRAINING) | \ + (1ULL << LOG_EVENT_LINK_LOSS) | \ + (1ULL << LOG_EVENT_UNDERFLOW) | \ + (1ULL << LOG_RESOURCE) | \ + (1ULL << LOG_FEATURE_OVERRIDE) | \ + (1ULL << LOG_DETECTION_EDID_PARSER) | \ + (1ULL << LOG_DC) | \ + (1ULL << LOG_HW_HOTPLUG) | \ + (1ULL << LOG_HW_SET_MODE) | \ + (1ULL << LOG_HW_RESUME_S3) | \ + (1ULL << LOG_HW_HPD_IRQ) | \ + (1ULL << LOG_SYNC) | \ + (1ULL << LOG_BANDWIDTH_VALIDATION) | \ + (1ULL << LOG_MST) | \ + (1ULL << LOG_DETECTION_DP_CAPS) | \ + (1ULL << LOG_BACKLIGHT)) | \ + (1ULL << LOG_I2C_AUX) | \ + (1ULL << LOG_IF_TRACE) | \ + (1ULL << LOG_HDMI_FRL) | \ + (1ULL << LOG_DTN) /* | \ + (1ULL << LOG_DEBUG) | \ + (1ULL << LOG_BIOS) | \ + (1ULL << LOG_SURFACE) | \ + (1ULL << LOG_SCALER) | \ + (1ULL << LOG_DML) | \ + (1ULL << LOG_HW_LINK_TRAINING) | \ + (1ULL << LOG_HW_AUDIO)| \ + (1ULL << LOG_BANDWIDTH_CALCS)*/ #endif /* __DAL_LOGGER_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index b9992ebf77a6..c33454a9e0b4 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -524,12 +524,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr, infopacket->sb[6] |= 0x04; /* PB7 = FreeSync Minimum refresh rate (Hz) */ - infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000); + infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000); /* PB8 = FreeSync Maximum refresh rate (Hz) * Note: We should never go above the field rate of the mode timing set. */ - infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000); + infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000); //FreeSync HDR @@ -734,6 +734,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, { struct core_freesync *core_freesync = NULL; unsigned long long nominal_field_rate_in_uhz = 0; + unsigned long long rounded_nominal_in_uhz = 0; unsigned int refresh_range = 0; unsigned long long min_refresh_in_uhz = 0; unsigned long long max_refresh_in_uhz = 0; @@ -747,24 +748,23 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); - /* Rounded to the nearest Hz */ - nominal_field_rate_in_uhz = 1000000ULL * - div_u64(nominal_field_rate_in_uhz + 500000, 1000000); - min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; - // Don't allow min > max - if (min_refresh_in_uhz > max_refresh_in_uhz) - min_refresh_in_uhz = max_refresh_in_uhz; - // Full range may be larger than current video timing, so cap at nominal if (max_refresh_in_uhz > nominal_field_rate_in_uhz) max_refresh_in_uhz = nominal_field_rate_in_uhz; // Full range may be larger than current video timing, so cap at nominal - if (min_refresh_in_uhz > nominal_field_rate_in_uhz) - min_refresh_in_uhz = nominal_field_rate_in_uhz; + if (min_refresh_in_uhz > max_refresh_in_uhz) + min_refresh_in_uhz = max_refresh_in_uhz; + + // If a monitor reports exactly max refresh of 2x of min, enforce it on nominal + rounded_nominal_in_uhz = + div_u64(nominal_field_rate_in_uhz + 50000, 100000) * 100000; + if (in_config->max_refresh_in_uhz == (2 * in_config->min_refresh_in_uhz) && + in_config->max_refresh_in_uhz == rounded_nominal_in_uhz) + min_refresh_in_uhz = div_u64(nominal_field_rate_in_uhz, 2); if (!vrr_settings_require_update(core_freesync, in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz, @@ -796,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; - in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - - 2 * in_out_vrr->min_duration_in_us; - if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) - in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; - in_out_vrr->supported = true; } @@ -808,9 +803,14 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->btr.btr_enabled = in_config->btr; - if (in_out_vrr->max_refresh_in_uhz < - 2 * in_out_vrr->min_refresh_in_uhz) + if (in_out_vrr->max_refresh_in_uhz < (2 * in_out_vrr->min_refresh_in_uhz)) in_out_vrr->btr.btr_enabled = false; + else { + in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - + 2 * in_out_vrr->min_duration_in_us; + if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) + in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; + } in_out_vrr->btr.btr_active = false; in_out_vrr->btr.inserted_duration_in_us = 0; @@ -1012,8 +1012,8 @@ unsigned long long mod_freesync_calc_nominal_field_rate( unsigned int total = stream->timing.h_total * stream->timing.v_total; /* Calculate nominal field rate for stream, rounded up to nearest integer */ - nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10; - nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; + nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz; + nominal_field_rate_in_uhz *= 100000000ULL; nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index 8aa528e874c4..cc1d3f470b99 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -52,8 +52,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) * hdcp is not desired */ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && - !hdcp->connection.displays[i].adjust.disable) { + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->displays[i].adjust.disable) { is_auth_needed = 1; break; } @@ -61,7 +61,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) && is_auth_needed && - !hdcp->connection.link.adjust.hdcp1.disable; + !hdcp->connection.link.adjust.hdcp1.disable && + !hdcp->connection.is_hdcp1_revoked; } static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) @@ -72,8 +73,8 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) * hdcp is not desired */ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && - !hdcp->connection.displays[i].adjust.disable) { + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->displays[i].adjust.disable) { is_auth_needed = 1; break; } @@ -103,8 +104,6 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, event_ctx->unexpected_event = 1; goto out; } - /* update topology event if hdcp is not desired */ - status = mod_hdcp_add_display_topology(hdcp); } else if (is_in_hdcp1_states(hdcp)) { status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1); } else if (is_in_hdcp1_dp_states(hdcp)) { @@ -115,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, } else if (is_in_hdcp2_dp_states(hdcp)) { status = mod_hdcp_hdcp2_dp_execution(hdcp, event_ctx, &input->hdcp2); + } else { + event_ctx->unexpected_event = 1; + goto out; } out: return status; @@ -191,14 +193,7 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp, mod_hdcp_hdcp1_destroy_session(hdcp); } - if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) { - status = mod_hdcp_remove_display_topology(hdcp); - if (status != MOD_HDCP_STATUS_SUCCESS) { - output->callback_needed = 0; - output->watchdog_timer_needed = 0; - goto out; - } - } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); @@ -212,25 +207,12 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp, goto out; } } - if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) { - status = mod_hdcp_remove_display_topology(hdcp); - if (status != MOD_HDCP_STATUS_SUCCESS) { - output->callback_needed = 0; - output->watchdog_timer_needed = 0; - goto out; - } - } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); set_state_id(hdcp, output, HDCP_INITIALIZED); } else if (is_in_cp_not_desired_state(hdcp)) { - status = mod_hdcp_remove_display_topology(hdcp); - if (status != MOD_HDCP_STATUS_SUCCESS) { - output->callback_needed = 0; - output->watchdog_timer_needed = 0; - goto out; - } HDCP_TOP_RESET_AUTH_TRACE(hdcp); memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); @@ -337,16 +319,19 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - /* add display to connection */ - hdcp->connection.link = *link; - *display_container = *display; - /* reset retry counters */ reset_retry_counts(hdcp); /* reset error trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + /* add display to connection */ + hdcp->connection.link = *link; + *display_container = *display; + status = mod_hdcp_add_display_to_topology(hdcp, display->index); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + /* request authentication */ if (current_state(hdcp) != HDCP_INITIALIZED) set_state_id(hdcp, output, HDCP_INITIALIZED); @@ -379,17 +364,20 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - /* remove display */ - display->state = MOD_HDCP_DISPLAY_INACTIVE; - /* clear retry counters */ reset_retry_counts(hdcp); /* reset error trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); - /* request authentication for remaining displays*/ - if (get_active_display_count(hdcp) > 0) + /* remove display */ + status = mod_hdcp_remove_display_from_topology(hdcp, index); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + display->state = MOD_HDCP_DISPLAY_INACTIVE; + + /* request authentication when connection is not reset */ + if (current_state(hdcp) != HDCP_UNINITIALIZED) callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output); out: @@ -496,10 +484,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: - mode = MOD_HDCP_MODE_DP; - break; case SIGNAL_TYPE_DISPLAY_PORT_MST: - mode = MOD_HDCP_MODE_DP_MST; + mode = MOD_HDCP_MODE_DP; break; default: break; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index af78e4f1be68..5cb4546be0ef 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -41,7 +41,6 @@ enum mod_hdcp_trans_input_result { struct mod_hdcp_transition_input_hdcp1 { uint8_t bksv_read; uint8_t bksv_validation; - uint8_t add_topology; uint8_t create_session; uint8_t an_write; uint8_t aksv_write; @@ -71,7 +70,6 @@ struct mod_hdcp_transition_input_hdcp1 { struct mod_hdcp_transition_input_hdcp2 { uint8_t hdcp2version_read; uint8_t hdcp2_capable_check; - uint8_t add_topology; uint8_t create_session; uint8_t ake_init_prepare; uint8_t ake_init_write; @@ -167,9 +165,9 @@ struct mod_hdcp_auth_counters { /* contains values per connection */ struct mod_hdcp_connection { struct mod_hdcp_link link; - struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; uint8_t is_repeater; uint8_t is_km_stored; + uint8_t is_hdcp1_revoked; uint8_t is_hdcp2_revoked; struct mod_hdcp_trace trace; uint8_t hdcp1_retry_count; @@ -202,6 +200,8 @@ struct mod_hdcp { struct mod_hdcp_config config; /* per connection */ struct mod_hdcp_connection connection; + /* per displays */ + struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; /* per authentication attempt */ struct mod_hdcp_authentication auth; /* per state in an authentication */ @@ -327,10 +327,10 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, /* TODO: add adjustment log */ /* psp functions */ -enum mod_hdcp_status mod_hdcp_add_display_topology( - struct mod_hdcp *hdcp); -enum mod_hdcp_status mod_hdcp_remove_display_topology( - struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_add_display_to_topology( + struct mod_hdcp *hdcp, uint8_t index); +enum mod_hdcp_status mod_hdcp_remove_display_from_topology( + struct mod_hdcp *hdcp, uint8_t index); enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp); @@ -392,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); /* hdcp version helpers */ static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp) { - return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP || - hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP); } static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp) { - return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP && + hdcp->connection.link.dp.mst_supported); } static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp) @@ -519,7 +519,7 @@ static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) uint8_t i; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_active(&hdcp->connection.displays[i])) + if (is_display_active(&hdcp->displays[i])) added_count++; return added_count; } @@ -530,7 +530,7 @@ static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) uint8_t i; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_added(&hdcp->connection.displays[i])) + if (is_display_added(&hdcp->displays[i])) added_count++; return added_count; } @@ -542,8 +542,8 @@ static inline struct mod_hdcp_display *get_first_added_display( struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_added(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (is_display_added(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; @@ -556,9 +556,9 @@ static inline struct mod_hdcp_display *get_active_display_at_index( struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (hdcp->connection.displays[i].index == index && - is_display_active(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (hdcp->displays[i].index == index && + is_display_active(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; @@ -571,8 +571,8 @@ static inline struct mod_hdcp_display *get_empty_display_container( struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (!is_display_active(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (!is_display_active(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 37670db64855..37c8c05497d6 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -168,10 +168,6 @@ static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp, goto out; } - if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, - &input->add_topology, &status, - hdcp, "add_topology")) - goto out; if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session, &input->create_session, &status, hdcp, "create_session")) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c index 76edcbe51f71..f3711914364e 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -46,8 +46,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS); break; case H1_A1_EXCHANGE_KSVS: - if (input->add_topology != PASS || - input->create_session != PASS) { + if (input->create_session != PASS) { /* out of sync with psp state */ adjust->hdcp1.disable = 1; fail_and_restart_in_ms(0, &status, output); @@ -173,8 +172,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS); break; case D1_A1_EXCHANGE_KSVS: - if (input->add_topology != PASS || - input->create_session != PASS) { + if (input->create_session != PASS) { /* out of sync with psp state */ adjust->hdcp1.disable = 1; fail_and_restart_in_ms(0, &status, output); @@ -210,7 +208,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, fail_and_restart_in_ms(0, &status, output); break; } else if (input->rx_validation != PASS) { - if (hdcp->state.stay_count < 2) { + if (hdcp->state.stay_count < 2 && + !hdcp->connection.is_hdcp1_revoked) { /* allow 2 additional retries */ callback_in_ms(0, output); increment_stay_counter(hdcp); @@ -231,6 +230,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { fail_and_restart_in_ms(0, &status, output); break; + } else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) { + fail_and_restart_in_ms(0, &status, output); + break; } if (conn->is_repeater) { set_watchdog_in_ms(hdcp, 5000, output); @@ -290,7 +292,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, fail_and_restart_in_ms(0, &status, output); break; } else if (input->ksvlist_vp_validation != PASS) { - if (hdcp->state.stay_count < 2) { + if (hdcp->state.stay_count < 2 && + !hdcp->connection.is_hdcp1_revoked) { /* allow 2 additional retries */ callback_in_ms(0, output); increment_stay_counter(hdcp); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 55246711700b..491c00f48026 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -34,7 +34,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp if (is_dp_hdcp(hdcp)) is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0; else - is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) && + is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) && (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0; return is_ready ? MOD_HDCP_STATUS_SUCCESS : @@ -67,7 +67,7 @@ static inline enum mod_hdcp_status check_reauthentication_request( MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : MOD_HDCP_STATUS_SUCCESS; else - ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ? + ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[1]) ? MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : MOD_HDCP_STATUS_SUCCESS; return ret; @@ -259,6 +259,7 @@ static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp, event_ctx->unexpected_event = 1; goto out; } + if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version, &input->hdcp2version_read, &status, hdcp, "hdcp2version_read")) @@ -281,10 +282,7 @@ static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp, event_ctx->unexpected_event = 1; goto out; } - if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, - &input->add_topology, &status, - hdcp, "add_topology")) - goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session, &input->create_session, &status, hdcp, "create_session")) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c index 8cae3e3aacd5..e738c7ae66ec 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -47,8 +47,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, } break; case H2_A1_SEND_AKE_INIT: - if (input->add_topology != PASS || - input->create_session != PASS || + if (input->create_session != PASS || input->ake_init_prepare != PASS) { /* out of sync with psp state */ adjust->hdcp2.disable = 1; @@ -389,8 +388,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, } break; case D2_A1_SEND_AKE_INIT: - if (input->add_topology != PASS || - input->create_session != PASS || + if (input->create_session != PASS || input->ake_init_prepare != PASS) { /* out of sync with psp state */ adjust->hdcp2.disable = 1; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index ff9d54812e62..bb5130f4228d 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id { MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, @@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = { [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, @@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = { [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, @@ -405,7 +408,7 @@ enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.ake_cert[0] = 3; + hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, hdcp->auth.msg.hdcp2.ake_cert+1, sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1); @@ -423,7 +426,7 @@ enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7; + hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, hdcp->auth.msg.hdcp2.ake_h_prime+1, sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1); @@ -441,7 +444,7 @@ enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8; + hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, hdcp->auth.msg.hdcp2.ake_pairing_info+1, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1); @@ -459,7 +462,7 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10; + hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, hdcp->auth.msg.hdcp2.lc_l_prime+1, sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1); @@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp) enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp) { - enum mod_hdcp_status status; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.rx_id_list[0] = 12; - status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, - hdcp->auth.msg.hdcp2.rx_id_list+1, - sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1); + uint32_t device_count = 0; + uint32_t rx_id_list_size = 0; + uint32_t bytes_read = 0; + hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + hdcp->auth.msg.hdcp2.rx_id_list+1, + HDCP_MAX_AUX_TRANSACTION_SIZE); + if (status == MOD_HDCP_STATUS_SUCCESS) { + bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE; + device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) + + (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4); + rx_id_list_size = MIN((21 + 5 * device_count), + (sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1)); + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2, + hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read, + (rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE); + } } else { status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, hdcp->auth.msg.hdcp2.rx_id_list, @@ -495,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17; + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1, sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c index 724ebcee9a19..44956f9ba178 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -90,10 +90,14 @@ char *mod_hdcp_status_to_str(int32_t status) return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING"; case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE: return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED: + return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED"; case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY: return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY"; case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE: return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED: + return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED"; case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION: return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION"; case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE: diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index ff91373ebada..d3192b9d0c3d 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -37,10 +37,11 @@ /* default logs */ #define HDCP_ERROR_TRACE(hdcp, status) \ HDCP_LOG_ERR(hdcp, \ - "[Link %d] WARNING %s IN STATE %s", \ + "[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \ hdcp->config.index, \ mod_hdcp_status_to_str(status), \ - mod_hdcp_state_id_to_str(hdcp->state.id)) + mod_hdcp_state_id_to_str(hdcp->state.id), \ + hdcp->state.stay_count) #define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \ HDCP_LOG_VER(hdcp, \ "[Link %d] HDCP 1.4 enabled on display %d", \ @@ -49,6 +50,15 @@ HDCP_LOG_VER(hdcp, \ "[Link %d] HDCP 2.2 enabled on display %d", \ hdcp->config.index, displayIndex) +#define HDCP_HDCP1_DISABLED_TRACE(hdcp, displayIndex) \ + HDCP_LOG_VER(hdcp, \ + "[Link %d] HDCP 1.4 disabled on display %d", \ + hdcp->config.index, displayIndex) +#define HDCP_HDCP2_DISABLED_TRACE(hdcp, displayIndex) \ + HDCP_LOG_VER(hdcp, \ + "[Link %d] HDCP 2.2 disabled on display %d", \ + hdcp->config.index, displayIndex) + /* state machine logs */ #define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \ HDCP_LOG_FSM(hdcp, \ @@ -102,6 +112,9 @@ sizeof(hdcp->auth.msg.hdcp1.bksv)); \ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ + HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \ + sizeof(hdcp->auth.msg.hdcp1.bstatus)); \ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ sizeof(hdcp->auth.msg.hdcp1.an)); \ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 7911dc157d5a..c2929815c3ee 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -44,83 +44,80 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp, in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; in->process.msg3_desc.msg_size = 0; } -enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp) -{ - - struct psp_context *psp = hdcp->config.psp.handle; - struct ta_dtm_shared_memory *dtm_cmd; - struct mod_hdcp_display *display = NULL; - uint8_t i; +enum mod_hdcp_status mod_hdcp_remove_display_from_topology( + struct mod_hdcp *hdcp, uint8_t index) + { + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = + get_active_display_at_index(hdcp, index); dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; - for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (is_display_added(&(hdcp->connection.displays[i]))) { - - memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); - - display = &hdcp->connection.displays[i]; + if (!display || !is_display_added(display)) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; - dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; - dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; - dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0; - dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); - psp_dtm_invoke(psp, dtm_cmd->cmd_id); + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; - if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) - return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + psp_dtm_invoke(psp, dtm_cmd->cmd_id); - display->state = MOD_HDCP_DISPLAY_ACTIVE; - HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); - } - } + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; - return MOD_HDCP_STATUS_SUCCESS; + display->state = MOD_HDCP_DISPLAY_ACTIVE; + HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); + + return MOD_HDCP_STATUS_SUCCESS; + } - -enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp) +enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp, + uint8_t index) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_dtm_shared_memory *dtm_cmd; - struct mod_hdcp_display *display = NULL; + struct mod_hdcp_display *display = + get_active_display_at_index(hdcp, index); struct mod_hdcp_link *link = &hdcp->connection.link; - uint8_t i; if (!psp->dtm_context.dtm_initialized) { DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); return MOD_HDCP_STATUS_FAILURE; } + if (!display || is_display_added(display)) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; - for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) { - display = &hdcp->connection.displays[i]; - - memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); - - dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; - dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; - dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1; - dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller; - dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line; - dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be; - dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe; - dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id; - dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version = - TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2; - dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; - - psp_dtm_invoke(psp, dtm_cmd->cmd_id); - - if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) - return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; - - display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; - HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); - } - } + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1; + dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller; + dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line; + dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be; + dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe; + if (is_dp_hdcp(hdcp)) + dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported; + + dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id; + dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version = + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); return MOD_HDCP_STATUS_SUCCESS; } @@ -164,6 +161,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; + uint8_t i = 0; hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -177,6 +175,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE; HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_encryption_enabled( + &hdcp->displays[i])) { + hdcp->displays[i].state = + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + HDCP_HDCP1_DISABLED_TRACE(hdcp, + hdcp->displays[i].index); + } return MOD_HDCP_STATUS_SUCCESS; } @@ -210,6 +216,10 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp) } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) { hdcp->connection.is_repeater = 0; + } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) { + hdcp->connection.is_hdcp1_revoked = 1; + return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED; } else return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; @@ -245,6 +255,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -264,10 +275,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE; + if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS && + hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) { + status = MOD_HDCP_STATUS_SUCCESS; + } else if (hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) { + hdcp->connection.is_hdcp1_revoked = 1; + status = MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED; + } else { + status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE; + } - return MOD_HDCP_STATUS_SUCCESS; + return status; } enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp) @@ -281,14 +301,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || - hdcp->connection.displays[i].adjust.disable) + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->displays[i].adjust.disable) continue; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id; - hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); @@ -296,8 +316,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; - hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; - HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index); } return MOD_HDCP_STATUS_SUCCESS; @@ -385,6 +405,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; + uint8_t i = 0; hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -398,6 +419,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE; HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp); + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_encryption_enabled( + &hdcp->displays[i])) { + hdcp->displays[i].state = + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + HDCP_HDCP2_DISABLED_TRACE(hdcp, + hdcp->displays[i].index); + } return MOD_HDCP_STATUS_SUCCESS; } @@ -473,9 +502,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp) hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; return MOD_HDCP_STATUS_SUCCESS; + } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) { + hdcp->connection.is_hdcp2_revoked = 1; + return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED; } - return MOD_HDCP_STATUS_FAILURE; + return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; } enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp) @@ -630,20 +662,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; struct mod_hdcp_display *display = get_first_added_display(hdcp); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); - msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; - - hdcp2_message_init(hdcp, msg_in); - if (!display) return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; - hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); @@ -695,6 +722,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp) hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; return MOD_HDCP_STATUS_SUCCESS; + } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) { + hdcp->connection.is_hdcp2_revoked = 1; + return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED; } @@ -717,10 +747,10 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || - hdcp->connection.displays[i].adjust.disable) + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->displays[i].adjust.disable) continue; - hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION; @@ -729,8 +759,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) break; - hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; - HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index); } return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h index 82a5e997d573..1a663dbbf810 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -117,6 +117,8 @@ struct ta_dtm_shared_memory { int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr); +enum { PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE = 5120 }; + enum ta_hdcp_command { TA_HDCP_COMMAND__INITIALIZE, TA_HDCP_COMMAND__HDCP1_CREATE_SESSION, @@ -134,7 +136,10 @@ enum ta_hdcp_command { TA_HDCP_COMMAND__UNUSED_3, TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2, TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2, - TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION + TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION, + TA_HDCP_COMMAND__HDCP_DESTROY_ALL_SESSIONS, + TA_HDCP_COMMAND__HDCP_SET_SRM, + TA_HDCP_COMMAND__HDCP_GET_SRM }; enum ta_hdcp2_msg_id { @@ -235,7 +240,8 @@ enum ta_hdcp_authentication_status { TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06, TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07, TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08, - TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09 + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED = 0x0A }; enum ta_hdcp2_msg_authentication_status { @@ -253,7 +259,8 @@ enum ta_hdcp2_msg_authentication_status { TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM, TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE, TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH, - TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST + TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED }; enum ta_hdcp_content_type { @@ -415,6 +422,22 @@ struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input { uint32_t display_handle; }; +struct ta_hdcp_cmd_set_srm_input { + uint32_t srm_buf_size; + uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE]; +}; + +struct ta_hdcp_cmd_set_srm_output { + uint8_t valid_signature; + uint32_t srm_version; +}; + +struct ta_hdcp_cmd_get_srm_output { + uint32_t srm_version; + uint32_t srm_buf_size; + uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE]; +}; + /**********************************************************/ /* Common input structure for HDCP callbacks */ union ta_hdcp_cmd_input { @@ -432,6 +455,7 @@ union ta_hdcp_cmd_input { struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 hdcp2_prepare_process_authentication_message_v2; struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption; + struct ta_hdcp_cmd_set_srm_input hdcp_set_srm; }; /* Common output structure for HDCP callbacks */ @@ -444,6 +468,8 @@ union ta_hdcp_cmd_output { struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2; struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 hdcp2_prepare_process_authentication_message_v2; + struct ta_hdcp_cmd_set_srm_output hdcp_set_srm; + struct ta_hdcp_cmd_get_srm_output hdcp_get_srm; }; /**********************************************************/ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index f2a0e1a064da..c088602bc1a0 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -56,8 +56,10 @@ enum mod_hdcp_status { MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE, MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING, MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE, + MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED, MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY, MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED, MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION, MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE, MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE, @@ -100,6 +102,7 @@ enum mod_hdcp_status { struct mod_hdcp_displayport { uint8_t rev; uint8_t assr_supported; + uint8_t mst_supported; }; struct mod_hdcp_hdmi { @@ -108,8 +111,7 @@ struct mod_hdcp_hdmi { enum mod_hdcp_operation_mode { MOD_HDCP_MODE_OFF, MOD_HDCP_MODE_DEFAULT, - MOD_HDCP_MODE_DP, - MOD_HDCP_MODE_DP_MST + MOD_HDCP_MODE_DP }; enum mod_hdcp_display_state { @@ -155,7 +157,8 @@ struct mod_hdcp_display_adjustment { struct mod_hdcp_link_adjustment_hdcp1 { uint8_t disable : 1; uint8_t postpone_encryption : 1; - uint8_t reserved : 6; + uint8_t min_auth_retries_wa : 1; + uint8_t reserved : 5; }; enum mod_hdcp_force_hdcp_type { diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index 42cbeffac640..13c57ff2abdc 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -34,8 +34,7 @@ struct dc_info_packet; struct mod_vrr_params; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet, - bool *use_vsc_sdp_for_colorimetry); + struct dc_info_packet *info_packet); void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 6a8a056424b8..cff3ab15fc0c 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -130,8 +130,7 @@ enum ColorimetryYCCDP { }; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet, - bool *use_vsc_sdp_for_colorimetry) + struct dc_info_packet *info_packet) { unsigned int vsc_packet_revision = vsc_packet_undefined; unsigned int i; @@ -139,11 +138,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, unsigned int colorimetryFormat = 0; bool stereo3dSupport = false; - /* Initialize first, later if infopacket is valid determine if VSC SDP - * should be used to signal colorimetry format and pixel encoding. - */ - *use_vsc_sdp_for_colorimetry = false; - if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) { vsc_packet_revision = vsc_packet_rev1; stereo3dSupport = true; @@ -153,9 +147,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, if (stream->psr_version != 0) vsc_packet_revision = vsc_packet_rev2; - /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */ - if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && - stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) + /* Update to revision 5 for extended colorimetry support */ + if (stream->use_vsc_sdp_for_colorimetry) vsc_packet_revision = vsc_packet_rev5; /* VSC packet not needed based on the features @@ -269,13 +262,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, info_packet->valid = true; - /* If we are using VSC SDP revision 05h, use this to signal for - * colorimetry format and pixel encoding. HW should later be - * programmed to set MSA MISC1 bit 6 to indicate ignore - * colorimetry format and pixel encoding in the MSA. - */ - *use_vsc_sdp_for_colorimetry = true; - /* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs * Data Bytes DB 18~16 * Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding) diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c index f0a153704f6e..00f132f8ad55 100644 --- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c +++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c @@ -40,14 +40,18 @@ struct core_vmid { static void add_ptb_to_table(struct core_vmid *core_vmid, unsigned int vmid, uint64_t ptb) { - core_vmid->ptb_assigned_to_vmid[vmid] = ptb; - core_vmid->num_vmids_available--; + if (vmid < MAX_VMID) { + core_vmid->ptb_assigned_to_vmid[vmid] = ptb; + core_vmid->num_vmids_available--; + } } static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned int vmid) { - core_vmid->ptb_assigned_to_vmid[vmid] = 0; - core_vmid->num_vmids_available++; + if (vmid < MAX_VMID) { + core_vmid->ptb_assigned_to_vmid[vmid] = 0; + core_vmid->num_vmids_available++; + } } static void evict_vmids(struct core_vmid *core_vmid) @@ -57,7 +61,7 @@ static void evict_vmids(struct core_vmid *core_vmid) // At this point any positions with value 0 are unused vmids, evict them for (i = 1; i < core_vmid->num_vmid; i++) { - if (ord & (1u << i)) + if (!(ord & (1u << i))) clear_entry_from_vmid_table(core_vmid, i); } } @@ -91,7 +95,7 @@ static int get_next_available_vmid(struct core_vmid *core_vmid) uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb) { struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid); - unsigned int vmid = 0; + int vmid = 0; // Physical address gets vmid 0 if (ptb == 0) diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h new file mode 100644 index 000000000000..82b6cc25205e --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _wafl2_4_0_0_SH_MASK_HEADER +#define _wafl2_4_0_0_SH_MASK_HEADER + +//PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h index 723af0b2dda0..4a51a90c611a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c +++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h @@ -1,5 +1,5 @@ /* - * Copyright 2017 Advanced Micro Devices, Inc. + * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -19,25 +19,11 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: AMD - * */ -#include "dml_common_defs.h" -#include "dcn_calc_math.h" - -#include "dml_inline_defs.h" - -double dml_round(double a) -{ - double round_pt = 0.5; - double ceil = dml_ceil(a, 1); - double floor = dml_floor(a, 1); - - if (a - floor >= round_pt) - return ceil; - else - return floor; -} +#ifndef _wafl2_4_0_0_SMN_HEADER +#define _wafl2_4_0_0_SMN_HEADER +#define smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS 0x11cf0210 +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h new file mode 100644 index 000000000000..f37712f05b03 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _xgmi_4_0_0_SH_MASK_HEADER +#define _xgmi_4_0_0_SH_MASK_HEADER + +//PCS_GOPX16_PCS_ERROR_STATUS +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h new file mode 100644 index 000000000000..6ccbac4ce87e --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _xgmi_4_0_0_SMN_HEADER +#define _xgmi_4_0_0_SMN_HEADER + +#define smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS 0x11af0210 + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index a607b1034962..a3c238c39ef5 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources { uint32_t num_queue_per_pipe; /* Bit n == 1 means Queue n is available for KFD */ - DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES); + DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); /* SDMA doorbell assignments (SOC15 and later chips only). Only * specific doorbells are routed to each SDMA engine. Others @@ -151,6 +151,7 @@ struct kgd2kfd_shared_resources { /* Minor device number of the render node */ int drm_render_minor; + }; struct tile_config { @@ -166,27 +167,6 @@ struct tile_config { #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 -/* - * Allocation flag domains - * NOTE: This must match the corresponding definitions in kfd_ioctl.h. - */ -#define ALLOC_MEM_FLAGS_VRAM (1 << 0) -#define ALLOC_MEM_FLAGS_GTT (1 << 1) -#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) -#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) -#define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4) - -/* - * Allocation flags attributes/access options. - * NOTE: This must match the corresponding definitions in kfd_ioctl.h. - */ -#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31) -#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) -#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29) -#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */ -#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) -#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */ - /** * struct kfd2kgd_calls * @@ -222,8 +202,6 @@ struct tile_config { * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID. * Only used for no cp scheduling mode * - * @get_tile_config: Returns GPU-specific tiling mode information - * * @set_vm_context_page_table_base: Program page table base for a VMID * * @invalidate_tlbs: Invalidate TLBs for a specific PASID @@ -236,6 +214,8 @@ struct tile_config { * * @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled * + * @get_unique_id: Returns uuid id of current device + * * This structure contains function pointers to services that the kgd driver * provides to amdkfd driver. * @@ -307,12 +287,11 @@ struct kfd2kgd_calls { void (*set_scratch_backing_va)(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); - int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); - void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); uint64_t (*get_hive_id)(struct kgd_dev *kgd); + uint64_t (*get_unique_id)(struct kgd_dev *kgd); }; diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index c195575366a3..8e2acb4df860 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -319,12 +319,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level & profile_mode_mask) { hwmgr->saved_dpm_level = hwmgr->dpm_level; hwmgr->en_umd_pstate = true; - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); } } else { /* exit umd pstate, restore level, enable gfx cg*/ @@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) if (!hwmgr) return -EINVAL; - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability) + if (!(hwmgr->not_vf && amdgpu_dpm) || + !hwmgr->hwmgr_func->get_asic_baco_capability) return 0; mutex_lock(&hwmgr->smu_lock); @@ -1469,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state) if (!hwmgr) return -EINVAL; - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state) + if (!(hwmgr->not_vf && amdgpu_dpm) || + !hwmgr->hwmgr_func->set_asic_baco_state) return 0; mutex_lock(&hwmgr->smu_lock); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 96e81c7bc266..e77046931e4c 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -23,15 +23,12 @@ #include <linux/firmware.h> #include <linux/pci.h> -#include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" #include "smu_internal.h" -#include "soc15_common.h" #include "smu_v11_0.h" #include "smu_v12_0.h" #include "atom.h" -#include "amd_pcie.h" #include "vega20_ppt.h" #include "arcturus_ppt.h" #include "navi10_ppt.h" @@ -121,20 +118,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu, if (enabled) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - feature_low); + feature_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - feature_high); + feature_high, NULL); if (ret) return ret; } else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - feature_low); + feature_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - feature_high); + feature_high, NULL); if (ret) return ret; } @@ -195,21 +192,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t return -EINVAL; if (if_version) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, if_version); + ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); if (ret) return ret; } if (smu_version) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, smu_version); + ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); if (ret) return ret; } @@ -218,17 +207,19 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t } int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, bool lock_needed) { int ret = 0; - if (min < 0 && max < 0) - return -EINVAL; - if (!smu_clk_dpm_is_enabled(smu, clk_type)) return 0; + if (lock_needed) + mutex_lock(&smu->mutex); ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max); + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -251,7 +242,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (max > 0) { param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, - param); + param, NULL); if (ret) return ret; } @@ -259,7 +250,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (min > 0) { param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - param); + param, NULL); if (ret) return ret; } @@ -335,12 +326,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); - ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex, - param); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, ¶m); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, + param, ¶m); if (ret) return ret; @@ -542,7 +529,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int ret = smu_send_smc_msg_with_param(smu, drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram, - table_id | ((argument & 0xFFFF) << 16)); + table_id | ((argument & 0xFFFF) << 16), + NULL); if (ret) return ret; @@ -900,6 +888,7 @@ static int smu_sw_init(void *handle) mutex_init(&smu->sensor_lock); mutex_init(&smu->metrics_lock); + mutex_init(&smu->message_lock); smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; @@ -943,6 +932,13 @@ static int smu_sw_init(void *handle) return ret; } + if (adev->smu.ppt_funcs->i2c_eeprom_init) { + ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); + + if (ret) + return ret; + } + return 0; } @@ -952,6 +948,9 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; + if (adev->smu.ppt_funcs->i2c_eeprom_fini) + smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); + kfree(smu->irq_source); smu->irq_source = NULL; @@ -1113,12 +1112,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return ret; } + ret = smu_set_driver_table_location(smu); + if (ret) + return ret; + /* smu_dump_pptable(smu); */ if (!amdgpu_sriov_vf(adev)) { - ret = smu_set_driver_table_location(smu); - if (ret) - return ret; - /* * Copy pptable bo in the vram to smc with SMU MSGs such as * SetDriverDramAddr and TransferTableDram2Smu. @@ -1155,6 +1154,21 @@ static int smu_smc_table_hw_init(struct smu_context *smu, } } } + + if (smu->ppt_funcs->set_power_source) { + /* + * For Navi1X, manually switch it to AC mode as PMFW + * may boot it with DC mode. + */ + if (adev->pm.ac_power) + ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC); + else + ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC); + if (ret) { + pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC"); + return ret; + } + } } if (adev->asic_type != CHIP_ARCTURUS) { ret = smu_notify_display_change(smu); @@ -1454,29 +1468,84 @@ int smu_reset(struct smu_context *smu) return ret; } +static int smu_disable_dpm(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t smu_version; + int ret = 0; + bool use_baco = !smu->is_apu && + ((adev->in_gpu_reset && + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || + ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version.\n"); + return ret; + } + + /* + * Disable all enabled SMU features. + * This should be handled in SMU FW, as a backup + * driver can issue call to SMU FW until sequence + * in SMU FW is operational. + */ + ret = smu_system_features_control(smu, false); + if (ret) { + pr_err("Failed to disable smu features.\n"); + return ret; + } + + /* + * Arcturus does not have BACO bit in disable feature mask. + * Enablement of BACO bit on Arcturus should be skipped. + */ + if (adev->asic_type == CHIP_ARCTURUS) { + if (use_baco && (smu_version > 0x360e00)) + return 0; + } + + /* For baco, need to leave BACO feature enabled */ + if (use_baco) { + /* + * Correct the way for checking whether SMU_FEATURE_BACO_BIT + * is supported. + * + * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will + * always return false as the 'smu_system_features_control(smu, false)' + * was just issued above which disabled all SMU features. + * + * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used + * now for the checking. + */ + if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) { + ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true); + if (ret) { + pr_warn("set BACO feature enabled failed, return %d\n", ret); + return ret; + } + } + } + + return ret; +} + static int smu_suspend(void *handle) { - int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; - bool baco_feature_is_enabled = false; + int ret; - if (!smu->pm_enabled) + if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) return 0; - if(!smu->is_apu) - baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); - - ret = smu_system_features_control(smu, false); - if (ret) - return ret; + if (!smu->pm_enabled) + return 0; - if (baco_feature_is_enabled) { - ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true); - if (ret) { - pr_warn("set BACO feature enabled failed, return %d\n", ret); + if(!amdgpu_sriov_vf(adev)) { + ret = smu_disable_dpm(smu); + if (ret) return ret; - } } smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); @@ -1675,12 +1744,12 @@ static int smu_enable_umd_pstate(void *handle, if (*level & profile_mode_mask) { smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; smu_dpm_ctx->enable_umd_pstate = true; - amdgpu_device_ip_set_clockgating_state(smu->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); amdgpu_device_ip_set_powergating_state(smu->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(smu->adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); } } else { /* exit umd pstate, restore level, enable gfx cg*/ @@ -1942,7 +2011,7 @@ int smu_set_mp1_state(struct smu_context *smu, return 0; } - ret = smu_send_smc_msg(smu, msg); + ret = smu_send_smc_msg(smu, msg, NULL); if (ret) pr_err("[PrepareMp1] Failed!\n"); @@ -2018,6 +2087,29 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, return 0; } +int smu_set_ac_dc(struct smu_context *smu) +{ + int ret = 0; + + /* controlled by firmware */ + if (smu->dc_controlled_by_gpio) + return 0; + + mutex_lock(&smu->mutex); + if (smu->ppt_funcs->set_power_source) { + if (smu->adev->pm.ac_power) + ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC); + else + ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC); + if (ret) + pr_err("Failed to switch to %s mode!\n", + smu->adev->pm.ac_power ? "AC" : "DC"); + } + mutex_unlock(&smu->mutex); + + return ret; +} + const struct amd_ip_funcs smu_ip_funcs = { .name = "smu", .early_init = smu_early_init, @@ -2620,12 +2712,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu) return ret; } - -int smu_send_smc_msg(struct smu_context *smu, - enum smu_message_type msg) -{ - int ret; - - ret = smu_send_smc_msg_with_param(smu, msg, 0); - return ret; -} diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 14ba6aa876e2..1ef0923f7190 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -21,7 +21,6 @@ * */ -#include "pp_debug.h" #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" @@ -36,13 +35,14 @@ #include "arcturus_ppt.h" #include "smu_v11_0_pptable.h" #include "arcturus_ppsmc.h" +#include "nbio/nbio_7_4_offset.h" #include "nbio/nbio_7_4_sh_mask.h" #include "amdgpu_xgmi.h" #include <linux/i2c.h> #include <linux/pci.h> #include "amdgpu_ras.h" -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) #define CTF_OFFSET_EDGE 5 #define CTF_OFFSET_HOTSPOT 5 @@ -127,6 +127,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(WaflTest, PPSMC_MSG_WaflTest), MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode), MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable), + MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl), }; static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = { @@ -373,13 +374,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | 0xFF)); + (clk_id << 16 | 0xFF), + &num_of_levels); if (ret) { pr_err("[%s] failed to get dpm levels!\n", __func__); return ret; } - smu_read_smc_arg(smu, &num_of_levels); if (!num_of_levels) { pr_err("[%s] number of clk levels is invalid!\n", __func__); return -EINVAL; @@ -389,12 +390,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | i)); + (clk_id << 16 | i), + &clk); if (ret) { pr_err("[%s] failed to get dpm freq by index!\n", __func__); return ret; } - smu_read_smc_arg(smu, &clk); if (!clk) { pr_err("[%s] clk value is invalid!\n", __func__); return -EINVAL; @@ -552,13 +553,13 @@ static int arcturus_run_btc(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL); if (ret) { pr_err("RunAfllBtc failed!\n"); return ret; } - return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc); + return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); } static int arcturus_populate_umd_state_clk(struct smu_context *smu) @@ -743,7 +744,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_GFXCLK << 16) | (freq & 0xffff)); + (PPCLK_GFXCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s gfxclk !\n", max ? "max" : "min"); @@ -758,7 +760,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_UCLK << 16) | (freq & 0xffff)); + (PPCLK_UCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s memclk !\n", max ? "max" : "min"); @@ -773,7 +776,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_SOCCLK << 16) | (freq & 0xffff)); + (PPCLK_SOCCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s socclk !\n", max ? "max" : "min"); @@ -790,8 +794,21 @@ static int arcturus_force_clk_levels(struct smu_context *smu, struct arcturus_dpm_table *dpm_table; struct arcturus_single_dpm_table *single_dpm_table; uint32_t soft_min_level, soft_max_level; + uint32_t smu_version; int ret = 0; + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version!\n"); + return ret; + } + + if (smu_version >= 0x361200) { + pr_err("Forcing clock level is not supported with " + "54.18 and onwards SMU firmwares\n"); + return -EOPNOTSUPP; + } + soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -1288,12 +1305,11 @@ static int arcturus_get_power_limit(struct smu_context *smu, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - power_src << 16); + power_src << 16, &asic_default_power_limit); if (ret) { pr_err("[%s] get PPT limit failed!", __func__); return ret; } - smu_read_smc_arg(smu, &asic_default_power_limit); } else { /* the last hope to figure out the ppt limit */ if (!pptable) { @@ -1497,7 +1513,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, + NULL); if (ret) { pr_err("Fail to set workload type %d\n", workload_type); return ret; @@ -1508,6 +1525,38 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return 0; } +static int arcturus_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level) +{ + uint32_t smu_version; + int ret; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version!\n"); + return ret; + } + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + case AMD_DPM_FORCED_LEVEL_LOW: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + if (smu_version >= 0x361200) { + pr_err("Forcing clock level is not supported with " + "54.18 and onwards SMU firmwares\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + return smu_v11_0_set_performance_level(smu, level); +} + static void arcturus_dump_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -2187,7 +2236,7 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &arcturus_i2c_eeprom_i2c_algo; - snprintf(control->name, sizeof(control->name), "RAS EEPROM"); + snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM"); res = i2c_add_adapter(control); if (res) @@ -2207,6 +2256,18 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) i2c_del_adapter(control); } +static bool arcturus_is_baco_supported(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t val; + + if (!smu_v11_0_baco_is_support(smu)) + return false; + + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; +} + static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu) { PPTable_t *pptable = smu->smu_table.driver_pptable; @@ -2214,6 +2275,27 @@ static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu) return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; } +static int arcturus_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + uint32_t smu_version; + int ret; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version!\n"); + return ret; + } + + /* PPSMC_MSG_DFCstateControl is supported by 54.15.0 and onwards */ + if (smu_version < 0x360F00) { + pr_err("DFCstateControl is only supported by PMFW 54.15.0 and onwards\n"); + return -EINVAL; + } + + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); +} + static const struct pptable_funcs arcturus_ppt_funcs = { /* translate smu index into arcturus specific index */ .get_smu_msg_index = arcturus_get_smu_msg_index, @@ -2248,7 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_profiling_clk_mask = arcturus_get_profiling_clk_mask, .get_power_profile_mode = arcturus_get_power_profile_mode, .set_power_profile_mode = arcturus_set_power_profile_mode, - .set_performance_level = smu_v11_0_set_performance_level, + .set_performance_level = arcturus_set_performance_level, /* debug (internal used) */ .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, @@ -2277,7 +2359,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, @@ -2298,7 +2379,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support= smu_v11_0_baco_is_support, + .baco_is_support= arcturus_is_baco_supported, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = smu_v11_0_baco_enter, @@ -2307,6 +2388,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .override_pcie_parameters = smu_v11_0_override_pcie_parameters, .get_pptable_power_limit = arcturus_get_pptable_power_limit, + .set_df_cstate = arcturus_set_df_cstate, }; void arcturus_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 77c14671866c..719597c5d27d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -984,6 +984,32 @@ static int init_thermal_controller( struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) { + hwmgr->thermal_controller.ucType = + powerplay_table->sThermalController.ucType; + hwmgr->thermal_controller.ucI2cLine = + powerplay_table->sThermalController.ucI2cLine; + hwmgr->thermal_controller.ucI2cAddress = + powerplay_table->sThermalController.ucI2cAddress; + + hwmgr->thermal_controller.fanInfo.bNoFan = + (0 != (powerplay_table->sThermalController.ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN)); + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + powerplay_table->sThermalController.ucFanParameters & + ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + + hwmgr->thermal_controller.fanInfo.ulMinRPM + = powerplay_table->sThermalController.ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM + = powerplay_table->sThermalController.ucFanMaxRPM * 100UL; + + set_hw_cap(hwmgr, + ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, + PHM_PlatformCaps_ThermalController); + + hwmgr->thermal_controller.use_hw_fan_control = 1; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index bf04cfefb283..4795eb66b2b2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) switch (sources) { default: pr_err("Unknown throttling event sources."); - /* fall through */ + fallthrough; case 0: protection = false; /* src is unused */ @@ -3698,12 +3698,12 @@ static int smu7_request_link_speed_change_before_state_change( data->force_pcie_gen = PP_PCIEGen2; if (current_link_speed == PP_PCIEGen2) break; - /* fall through */ + fallthrough; case PP_PCIEGen2: if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false)) break; + fallthrough; #endif - /* fall through */ default: data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); break; @@ -3804,9 +3804,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, { uint32_t i; + /* force the trim if mclk_switching is disabled to prevent flicker */ + bool force_trim = (low_limit == high_limit); for (i = 0; i < dpm_table->count; i++) { /*skip the trim if od is enabled*/ - if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit + if ((!hwmgr->od_enabled || force_trim) + && (dpm_table->dpm_levels[i].value < low_limit || dpm_table->dpm_levels[i].value > high_limit)) dpm_table->dpm_levels[i].enabled = false; else diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 92a65e3daff4..f29f95be1e56 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3382,7 +3382,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( } if (data->need_update_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) { + (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK | DPMTABLE_UPDATE_SOCCLK)) { result = vega10_populate_all_graphic_levels(hwmgr); PP_ASSERT_WITH_CODE((0 == result), "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", @@ -3390,7 +3390,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( } if (data->need_update_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { result = vega10_populate_all_memory_levels(hwmgr); PP_ASSERT_WITH_CODE((0 == result), "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 3b3ec5666051..08b6ba39a6d7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -487,15 +487,16 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); int ret = 0; + bool use_baco = (adev->in_gpu_reset && + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || + (adev->in_runpm && amdgpu_asic_supports_baco(adev)); ret = vega20_init_sclk_threshold(hwmgr); PP_ASSERT_WITH_CODE(!ret, "Failed to init sclk threshold!", return ret); - if (adev->in_gpu_reset && - (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) { - + if (use_baco) { ret = vega20_baco_apply_vdci_flush_workaround(hwmgr); if (ret) pr_err("Failed to apply vega20 baco workaround!\n"); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 97b6714e83e6..ae2c318dd6fa 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -362,6 +362,7 @@ struct smu_context struct mutex mutex; struct mutex sensor_lock; struct mutex metrics_lock; + struct mutex message_lock; uint64_t pool_size; struct smu_table_context smu_table; @@ -371,6 +372,9 @@ struct smu_context struct amd_pp_display_configuration *display_config; struct smu_baco_context smu_baco; void *od_settings; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_sclk; +#endif uint32_t pstate_sclk; uint32_t pstate_mclk; @@ -404,6 +408,7 @@ struct smu_context uint32_t smc_if_version; bool uploading_custom_pp_table; + bool dc_controlled_by_gpio; }; struct i2c_adapter; @@ -514,8 +519,7 @@ struct pptable_funcs { int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); int (*system_features_control)(struct smu_context *smu, bool en); int (*send_smc_msg_with_param)(struct smu_context *smu, - enum smu_message_type msg, uint32_t param); - int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg); + enum smu_message_type msg, uint32_t param, uint32_t *read_arg); int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*set_allowed_mask)(struct smu_context *smu); int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); @@ -567,6 +571,7 @@ struct pptable_funcs { int (*override_pcie_parameters)(struct smu_context *smu); uint32_t (*get_pptable_power_limit)(struct smu_context *smu); int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu); + int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src); }; int smu_load_microcode(struct smu_context *smu); @@ -707,7 +712,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max, bool lock_needed); int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool lock_needed); int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -715,6 +720,7 @@ int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_set_display_count(struct smu_context *smu, uint32_t count); +int smu_set_ac_dc(struct smu_context *smu); bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature); diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h index e3291259b249..f736d773f9d6 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h @@ -110,7 +110,11 @@ //Others #define PPSMC_MSG_SetMemoryChannelEnable 0x39 -#define PPSMC_Message_Count 0x3A +//OOB +#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A + +#define PPSMC_MSG_DFCstateControl 0x3B +#define PPSMC_Message_Count 0x3C typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h index 822cd8b5bf90..cea65093b6ad 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h @@ -37,7 +37,7 @@ #define PP_ASSERT_WITH_CODE(cond, msg, code) \ do { \ if (!(cond)) { \ - pr_warn("%s\n", msg); \ + pr_warn_ratelimited("%s\n", msg); \ code; \ } \ } while (0) @@ -45,7 +45,7 @@ #define PP_ASSERT(cond, msg) \ do { \ if (!(cond)) { \ - pr_warn("%s\n", msg); \ + pr_warn_ratelimited("%s\n", msg); \ } \ } while (0) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h index ac0120e384be..4b2da98afcd2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h @@ -701,7 +701,8 @@ typedef struct { // APCC Settings uint16_t PccThresholdLow; uint16_t PccThresholdHigh; - uint32_t PaddingAPCC[6]; //FIXME pending SPEC + uint32_t MGpuFanBoostLimitRpm; + uint32_t PaddingAPCC[5]; // Temperature Dependent Vmin uint16_t VDDGFX_TVmin; //Celcius diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index d5314d12628a..674e426ed59b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -28,8 +28,9 @@ #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU11_DRIVER_IF_VERSION_VG20 0x13 #define SMU11_DRIVER_IF_VERSION_ARCT 0x12 -#define SMU11_DRIVER_IF_VERSION_NV10 0x33 -#define SMU11_DRIVER_IF_VERSION_NV14 0x34 +#define SMU11_DRIVER_IF_VERSION_NV10 0x35 +#define SMU11_DRIVER_IF_VERSION_NV12 0x33 +#define SMU11_DRIVER_IF_VERSION_NV14 0x36 /* MP Apertures */ #define MP0_Public 0x03800000 @@ -182,9 +183,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu, int smu_v11_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param); - -int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg); + uint32_t param, + uint32_t *read_arg); int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); @@ -267,4 +267,7 @@ uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu); int smu_v11_0_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); +int smu_v11_0_set_power_source(struct smu_context *smu, + enum smu_power_src_type power_src); + #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index d79e54b5ebf6..7fbebc1979cf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -40,14 +40,13 @@ struct smu_12_0_cmn2aisc_mapping { int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg); -int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg); - int smu_v12_0_wait_for_response(struct smu_context *smu); int smu_v12_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param); + uint32_t param, + uint32_t *read_arg); int smu_v12_0_check_fw_status(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index aed4d6e60907..15030284b444 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -21,7 +21,6 @@ * */ -#include "pp_debug.h" #include <linux/firmware.h> #include <linux/pci.h> #include "amdgpu.h" @@ -29,14 +28,15 @@ #include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" +#include "soc15_common.h" #include "smu_v11_0.h" #include "smu11_driver_if_navi10.h" -#include "soc15_common.h" #include "atom.h" #include "navi10_ppt.h" #include "smu_v11_0_pptable.h" #include "smu_v11_0_ppsmc.h" -#include "nbio/nbio_7_4_sh_mask.h" +#include "nbio/nbio_2_3_offset.h" +#include "nbio/nbio_2_3_sh_mask.h" #include "asic_reg/mp/mp_11_0_sh_mask.h" @@ -349,7 +349,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT) | FEATURE_MASK(FEATURE_FW_DSTATE_BIT) | FEATURE_MASK(FEATURE_BACO_BIT) - | FEATURE_MASK(FEATURE_ACDC_BIT) | FEATURE_MASK(FEATURE_GFX_SS_BIT) | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) | FEATURE_MASK(FEATURE_FW_CTF_BIT) @@ -393,6 +392,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT); + if (smu->dc_controlled_by_gpio) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT); + /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */ if (is_asic_secure(smu)) { /* only for navi10 A0 */ @@ -527,6 +529,9 @@ static int navi10_store_powerplay_table(struct smu_context *smu) table_context->thermal_controller_type = powerplay_table->thermal_controller_type; + if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC) + smu->dc_controlled_by_gpio = true; + mutex_lock(&smu_baco->mutex); if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) @@ -661,14 +666,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) if (enable) { /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL); if (ret) return ret; } power_gate->vcn_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret; } @@ -686,14 +691,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) if (enable) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL); if (ret) return ret; } power_gate->jpeg_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL); if (ret) return ret; } @@ -970,7 +975,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return size; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return size; break; @@ -1042,7 +1047,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) int ret = 0; uint32_t max_freq = 0; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL); if (ret) return ret; @@ -1066,7 +1071,8 @@ static int navi10_display_config_changed(struct smu_context *smu) smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, - smu->display_config->num_display); + smu->display_config->num_display, + NULL); if (ret) return ret; } @@ -1093,7 +1099,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -1119,7 +1125,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -1391,7 +1397,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u if (workload_type < 0) return -EINVAL; smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, NULL); return ret; } @@ -1456,7 +1462,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcef_clock_in_sr/100); + min_clocks.dcef_clock_in_sr/100, + NULL); if (ret) { pr_err("Attempt to set divider for DCEFCLK Failed!"); return ret; @@ -1678,10 +1685,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu) return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO); } - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -1746,10 +1753,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -1859,12 +1866,11 @@ static int navi10_get_power_limit(struct smu_context *smu, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - power_src << 16); + power_src << 16, &asic_default_power_limit); if (ret) { pr_err("[%s] get PPT limit failed!", __func__); return ret; } - smu_read_smc_arg(smu, &asic_default_power_limit); } else { /* the last hope to figure out the ppt limit */ if (!pptable) { @@ -1904,7 +1910,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, pptable->PcieLaneCount[i] : pcie_width_cap); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, - smu_pcie_arg); + smu_pcie_arg, + NULL); if (ret) return ret; @@ -1950,13 +1957,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetVoltageByDpm, - param); + param, + &value); if (ret) { pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); return ret; } - smu_read_smc_arg(smu, &value); *voltage = (uint16_t)value; return 0; @@ -1980,6 +1987,18 @@ static int navi10_setup_od_limits(struct smu_context *smu) { return 0; } +static bool navi10_is_baco_supported(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t val; + + if (!smu_v11_0_baco_is_support(smu)) + return false; + + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; +} + static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) { OverDriveTable_t *od_table, *boot_od_table; int ret = 0; @@ -2213,7 +2232,7 @@ static int navi10_run_btc(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc); + ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL); if (ret) pr_err("RunBtc failed!\n"); @@ -2225,9 +2244,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) int result = 0; if (!enable) - result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE); + result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL); else - result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE); + result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL); return result; } @@ -2336,7 +2355,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, @@ -2357,7 +2375,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support= smu_v11_0_baco_is_support, + .baco_is_support= navi10_is_baco_supported, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = smu_v11_0_baco_enter, @@ -2370,6 +2388,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_pptable_power_limit = navi10_get_pptable_power_limit, .run_btc = navi10_run_btc, .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround, + .set_power_source = smu_v11_0_set_power_source, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 3ad0f4aa3aa3..b0ed1b3fe79a 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -24,7 +24,6 @@ #include "amdgpu.h" #include "amdgpu_smu.h" #include "smu_internal.h" -#include "soc15_common.h" #include "smu_v12_0_ppsmc.h" #include "smu12_driver_if.h" #include "smu_v12_0.h" @@ -240,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; + bool cur_value_match_level = false; if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; @@ -298,8 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu, GET_DPM_CUR_FREQ(clk_table, clk_type, i, value); size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); + if (cur_value == value) + cur_value_match_level = true; } + if (!cur_value_match_level) + size += sprintf(buf + size, " %uMhz *\n", cur_value); + return size; } @@ -342,14 +347,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) if (enable) { /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); if (ret) return ret; } power_gate->vcn_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret; } @@ -367,14 +372,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) if (enable) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); if (ret) return ret; } power_gate->jpeg_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); if (ret) return ret; } @@ -423,7 +428,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -456,7 +461,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { if (ret) return ret; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -622,22 +627,24 @@ static int renoir_force_clk_levels(struct smu_context *smu, return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, soft_max_level == 0 ? min_freq : - soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq); + soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq, + NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, soft_min_level == 2 ? max_freq : - soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq); + soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq, + NULL); if (ret) return ret; break; case SMU_SOCCLK: GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL); if (ret) return ret; break; @@ -645,10 +652,10 @@ static int renoir_force_clk_levels(struct smu_context *smu, case SMU_FCLK: GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL); if (ret) return ret; break; @@ -672,14 +679,19 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_workload_get_type(smu, smu->power_profile_mode); if (workload_type < 0) { - pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode); + /* + * TODO: If some case need switch to powersave/default power mode + * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving. + */ + pr_err_once("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode); return -EINVAL; } ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, + NULL); if (ret) { - pr_err("Fail to set workload type %d\n", workload_type); + pr_err_once("Fail to set workload type %d\n", workload_type); return ret; } @@ -697,7 +709,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; @@ -705,7 +717,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -881,6 +893,22 @@ static int renoir_read_sensor(struct smu_context *smu, return ret; } +static bool renoir_is_dpm_running(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + /* + * Util now, the pmfw hasn't exported the interface of SMU + * feature mask to APU SKU so just force on all the feature + * at early initial stage. + */ + if (adev->in_suspend) + return false; + else + return true; + +} + static const struct pptable_funcs renoir_ppt_funcs = { .get_smu_msg_index = renoir_get_smu_msg_index, .get_smu_clk_index = renoir_get_smu_clk_index, @@ -910,7 +938,6 @@ static const struct pptable_funcs renoir_ppt_funcs = { .powergate_vcn = smu_v12_0_powergate_vcn, .powergate_jpeg = smu_v12_0_powergate_jpeg, .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, - .read_smc_arg = smu_v12_0_read_arg, .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, .gfx_off_control = smu_v12_0_gfx_off_control, .init_smc_tables = smu_v12_0_init_smc_tables, @@ -922,6 +949,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .mode2_reset = smu_v12_0_mode2_reset, .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, .set_driver_table_location = smu_v12_0_set_driver_table_location, + .is_dpm_running = renoir_is_dpm_running, }; void renoir_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h index 2a390ddd37dd..89cd6da118a3 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h @@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu); freq = table->SocClocks[dpm_level].Freq; \ break; \ case SMU_MCLK: \ - freq = table->MemClocks[dpm_level].Freq; \ + freq = table->FClocks[dpm_level].Freq; \ break; \ case SMU_DCEFCLK: \ freq = table->DcfClocks[dpm_level].Freq; \ diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 7bd200ffcda8..40c35bcc5a0a 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -79,12 +79,13 @@ #define smu_set_default_od_settings(smu, initialize) \ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); +#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \ + ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0) + +static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) { + return smu_send_smc_msg_with_param(smu, msg, 0, read_arg); +} -#define smu_send_smc_msg_with_param(smu, msg, param) \ - ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) -#define smu_read_smc_arg(smu, arg) \ - ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0) #define smu_alloc_dpm_context(smu) \ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) #define smu_init_display_count(smu, count) \ @@ -210,4 +211,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); #define smu_disable_umc_cdr_12gbps_workaround(smu) \ ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0) +#define smu_set_power_source(smu, power_src) \ + ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c9e5ce135fd4..655ba4fb05dc 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -26,7 +26,6 @@ #define SMU_11_0_PARTIAL_PPTABLE -#include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" #include "smu_internal.h" @@ -43,8 +42,6 @@ #include "asic_reg/thm/thm_11_0_2_sh_mask.h" #include "asic_reg/mp/mp_11_0_offset.h" #include "asic_reg/mp/mp_11_0_sh_mask.h" -#include "asic_reg/nbio/nbio_7_4_offset.h" -#include "asic_reg/nbio/nbio_7_4_sh_mask.h" #include "asic_reg/smuio/smuio_11_0_0_offset.h" #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" @@ -64,7 +61,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) +static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -92,7 +89,8 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) int smu_v11_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param) + uint32_t param, + uint32_t *read_arg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -101,11 +99,12 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, if (index < 0) return index; + mutex_lock(&smu->message_lock); ret = smu_v11_0_wait_for_response(smu); if (ret) { pr_err("Msg issuing pre-check failed and " "SMU may be not in the right state!\n"); - return ret; + goto out; } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -115,10 +114,21 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index); ret = smu_v11_0_wait_for_response(smu); - if (ret) + if (ret) { pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", smu_get_message_name(smu, msg), index, param, ret); - + goto out; + } + if (read_arg) { + ret = smu_v11_0_read_arg(smu, read_arg); + if (ret) { + pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n", + smu_get_message_name(smu, msg), index, param, ret); + goto out; + } + } +out: + mutex_unlock(&smu->message_lock); return ret; } @@ -262,6 +272,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) case CHIP_NAVI10: smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10; break; + case CHIP_NAVI12: + smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12; + break; case CHIP_NAVI14: smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14; break; @@ -671,12 +684,14 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSystemVirtualDramAddrHigh, - address_high); + address_high, + NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSystemVirtualDramAddrLow, - address_low); + address_low, + NULL); if (ret) return ret; @@ -685,15 +700,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) address_low = (uint32_t)lower_32_bits(address); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, - address_high); + address_high, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, - address_low); + address_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, - (uint32_t)memory_pool->size); + (uint32_t)memory_pool->size, NULL); if (ret) return ret; @@ -757,7 +772,7 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) int ret; ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_SetMinDeepSleepDcefclk, clk); + SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); if (ret) pr_err("SMU11 attempt to set divider for DCEFCLK Failed!"); @@ -784,11 +799,13 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu) if (driver_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(driver_table->mc_address)); + upper_32_bits(driver_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(driver_table->mc_address)); + lower_32_bits(driver_table->mc_address), + NULL); } return ret; @@ -802,11 +819,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu) if (tool_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetToolsDramAddrHigh, - upper_32_bits(tool_table->mc_address)); + upper_32_bits(tool_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetToolsDramAddrLow, - lower_32_bits(tool_table->mc_address)); + lower_32_bits(tool_table->mc_address), + NULL); } return ret; @@ -819,7 +838,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) if (!smu->pm_enabled) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); return ret; } @@ -837,12 +856,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, - feature_mask[1]); + feature_mask[1], NULL); if (ret) goto failed; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, - feature_mask[0]); + feature_mask[0], NULL); if (ret) goto failed; @@ -862,17 +881,11 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu, return -EINVAL; if (bitmap_empty(feature->enabled, feature->feature_num)) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_high); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); if (ret) return ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_low); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); if (ret) return ret; @@ -894,7 +907,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu, int ret = 0; ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : - SMU_MSG_DisableAllSmuFeatures)); + SMU_MSG_DisableAllSmuFeatures), NULL); if (ret) return ret; @@ -923,7 +936,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu) return ret; if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); return ret; } @@ -947,30 +960,24 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, - clk_id << 16); + clk_id << 16, clock); if (ret) { pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); return ret; } - ret = smu_read_smc_arg(smu, clock); - if (ret) - return ret; - if (*clock != 0) return 0; /* if DC limit is zero, return AC limit */ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, - clk_id << 16); + clk_id << 16, clock); if (ret) { pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!"); return ret; } - ret = smu_read_smc_arg(smu, clock); - - return ret; + return 0; } int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) @@ -1106,7 +1113,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) return -EOPNOTSUPP; } - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); if (ret) { pr_err("[%s] Set power limit Failed!\n", __func__); return ret; @@ -1136,11 +1143,7 @@ int smu_v11_0_get_current_clk_freq(struct smu_context *smu, ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq, - (asic_clk_id << 16)); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, &freq); + (asic_clk_id << 16), &freq); if (ret) return ret; } @@ -1375,9 +1378,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) - ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); else - ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); break; default: break; @@ -1515,10 +1518,18 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, int ret = 0; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetXgmiMode, - pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, + NULL); return ret; } +static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu) +{ + return smu_send_smc_msg(smu, + SMU_MSG_ReenableAcDcInterrupt, + NULL); +} + #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ @@ -1552,6 +1563,9 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, break; } + } else if (client_id == SOC15_IH_CLIENTID_MP1) { + if (src_id == 0xfe) + smu_v11_0_ack_ac_dc_interrupt(&adev->smu); } return 0; @@ -1591,6 +1605,12 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu) if (ret) return ret; + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, + 0xfe, + irq_src); + if (ret) + return ret; + return ret; } @@ -1628,21 +1648,19 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); + ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); return ret; } static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq) { - return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq); + return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } bool smu_v11_0_baco_is_support(struct smu_context *smu) { - struct amdgpu_device *adev = smu->adev; struct smu_baco_context *smu_baco = &smu->smu_baco; - uint32_t val; bool baco_support; mutex_lock(&smu_baco->mutex); @@ -1657,11 +1675,7 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) return false; - val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); - if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - return true; - - return false; + return true; } enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) @@ -1678,11 +1692,9 @@ enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { - struct smu_baco_context *smu_baco = &smu->smu_baco; struct amdgpu_device *adev = smu->adev; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - uint32_t bif_doorbell_intr_cntl; uint32_t data; int ret = 0; @@ -1691,32 +1703,26 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) mutex_lock(&smu_baco->mutex); - bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); - if (state == SMU_BACO_STATE_ENTER) { - bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, - BIF_DOORBELL_INT_CNTL, - DOORBELL_INTERRUPT_DISABLE, 1); - WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); - if (!ras || !ras->supported) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); data |= 0x80000000; WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); } } else { - ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco); + ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); if (ret) goto out; - bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, - BIF_DOORBELL_INT_CNTL, - DOORBELL_INTERRUPT_DISABLE, 0); - WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + if (ras && ras->supported) { + ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); + if (ret) + goto out; + } /* clear vbios scratch 6 and 7 for coming asic reinit */ WREG32(adev->bios_scratch_reg_offset + 6, 0); @@ -1777,19 +1783,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c param = (clk_id & 0xffff) << 16; if (max) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param); - if (ret) - goto failed; - ret = smu_read_smc_arg(smu, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); if (ret) goto failed; } if (min) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param); - if (ret) - goto failed; - ret = smu_read_smc_arg(smu, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); if (ret) goto failed; } @@ -1811,7 +1811,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ if (max > 0) { param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, - param); + param, NULL); if (ret) return ret; } @@ -1819,7 +1819,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ if (min > 0) { param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, - param); + param, NULL); if (ret) return ret; } @@ -1939,3 +1939,18 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, return ret; } +int smu_v11_0_set_power_source(struct smu_context *smu, + enum smu_power_src_type power_src) +{ + int pwr_source; + + pwr_source = smu_power_get_index(smu, (uint32_t)power_src); + if (pwr_source < 0) + return -EINVAL; + + return smu_send_smc_msg_with_param(smu, + SMU_MSG_NotifyPowerSource, + pwr_source, + NULL); +} + diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 518e6597bf2d..169ebdad87b8 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -20,7 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "pp_debug.h" #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" @@ -50,7 +49,7 @@ int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) +static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -78,7 +77,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu) int smu_v12_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param) + uint32_t param, + uint32_t *read_arg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -87,11 +87,12 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, if (index < 0) return index; + mutex_lock(&smu->message_lock); ret = smu_v12_0_wait_for_response(smu); if (ret) { pr_err("Msg issuing pre-check failed and " "SMU may be not in the right state!\n"); - return ret; + goto out; } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -101,10 +102,21 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index); ret = smu_v12_0_wait_for_response(smu); - if (ret) + if (ret) { pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n", index, ret, param); - + goto out; + } + if (read_arg) { + ret = smu_v12_0_read_arg(smu, read_arg); + if (ret) { + pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n", + index, ret, param); + goto out; + } + } +out: + mutex_unlock(&smu->message_lock); return ret; } @@ -163,9 +175,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma); + return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL); else - return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma); + return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL); } int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) @@ -174,9 +186,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); else - return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn); + return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL); } int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) @@ -185,9 +197,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); else - return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); } int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) @@ -196,7 +208,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) return 0; return smu_v12_0_send_msg_with_param(smu, - SMU_MSG_SetGfxCGPG, enable ? 1 : 0); + SMU_MSG_SetGfxCGPG, + enable ? 1 : 0, + NULL); } int smu_v12_0_read_sensor(struct smu_context *smu, @@ -262,10 +276,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0, timeout = 500; if (enable) { - ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); } else { - ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); /* confirm gfx is back to "on" state, timeout is 0.5 second */ while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) { @@ -331,17 +345,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu, if (!feature_mask || num < 2) return -EINVAL; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_high); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); if (ret) return ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_low); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); if (ret) return ret; @@ -388,14 +396,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency); + ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max); if (ret) { pr_err("Attempt to get max GX frequency from SMC Failed !\n"); goto failed; } - ret = smu_read_smc_arg(smu, max); - if (ret) - goto failed; break; case SMU_UCLK: case SMU_FCLK: @@ -419,14 +424,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency); + ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min); if (ret) { pr_err("Attempt to get min GX frequency from SMC Failed !\n"); goto failed; } - ret = smu_read_smc_arg(smu, min); - if (ret) - goto failed; break; case SMU_UCLK: case SMU_FCLK: @@ -450,7 +452,7 @@ failed: } int smu_v12_0_mode2_reset(struct smu_context *smu){ - return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2); + return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL); } int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -461,39 +463,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL); if (ret) return ret; break; case SMU_FCLK: case SMU_MCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL); if (ret) return ret; break; case SMU_SOCCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL); if (ret) return ret; break; case SMU_VCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL); if (ret) return ret; break; @@ -512,11 +514,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu) if (driver_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(driver_table->mc_address)); + upper_32_bits(driver_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(driver_table->mc_address)); + lower_32_bits(driver_table->mc_address), + NULL); } return ret; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index 49e5ef3e3876..16aa171971d3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -33,6 +33,8 @@ #include "smu7_smumgr.h" #include "vega20_hwmgr.h" +#include "smu_v11_0_i2c.h" + /* MP Apertures */ #define MP0_Public 0x03800000 #define MP0_SRAM 0x03900000 @@ -406,6 +408,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr) struct vega20_smumgr *priv; unsigned long tools_size = 0x19000; int ret = 0; + struct amdgpu_device *adev = hwmgr->adev; struct cgs_firmware_info info = {0}; @@ -505,6 +508,10 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr) priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01; priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t); + ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c); + if (ret) + goto err4; + return 0; err4: @@ -537,6 +544,9 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr) { struct vega20_smumgr *priv = (struct vega20_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; + + smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c); if (priv) { amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle, @@ -560,6 +570,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr) kfree(hwmgr->smu_backend); hwmgr->smu_backend = NULL; } + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 4ad8d6c14ee5..3f1044326dcb 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -21,7 +21,6 @@ * */ -#include "pp_debug.h" #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" @@ -36,6 +35,7 @@ #include "vega20_ppt.h" #include "vega20_pptable.h" #include "vega20_ppsmc.h" +#include "nbio/nbio_7_4_offset.h" #include "nbio/nbio_7_4_sh_mask.h" #include "asic_reg/thm/thm_11_0_2_offset.h" #include "asic_reg/thm/thm_11_0_2_sh_mask.h" @@ -587,7 +587,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu) static int vega20_run_btc_afll(struct smu_context *smu) { - return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL); } #define FEATURE_MASK(feature) (1ULL << feature) @@ -670,13 +670,13 @@ vega20_set_single_dpm_table(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | 0xFF)); + (clk_id << 16 | 0xFF), + &num_of_levels); if (ret) { pr_err("[GetNumOfDpmLevel] failed to get dpm levels!"); return ret; } - smu_read_smc_arg(smu, &num_of_levels); if (!num_of_levels) { pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!"); return -EINVAL; @@ -687,12 +687,12 @@ vega20_set_single_dpm_table(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | i)); + (clk_id << 16 | i), + &clk); if (ret) { pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!"); return ret; } - smu_read_smc_arg(smu, &clk); if (!clk) { pr_err("[GetDpmFreqByIndex] clk value is invalid!"); return -EINVAL; @@ -1200,7 +1200,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_GFXCLK << 16) | (freq & 0xffff)); + (PPCLK_GFXCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s gfxclk !\n", max ? "max" : "min"); @@ -1215,7 +1216,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_UCLK << 16) | (freq & 0xffff)); + (PPCLK_UCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s memclk !\n", max ? "max" : "min"); @@ -1230,7 +1232,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_SOCCLK << 16) | (freq & 0xffff)); + (PPCLK_SOCCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s socclk !\n", max ? "max" : "min"); @@ -1245,7 +1248,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_FCLK << 16) | (freq & 0xffff)); + (PPCLK_FCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s fclk !\n", max ? "max" : "min"); @@ -1260,7 +1264,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, if (!max) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_DCEFCLK << 16) | (freq & 0xffff)); + (PPCLK_DCEFCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set hard min dcefclk !\n"); return ret; @@ -1421,7 +1426,9 @@ static int vega20_force_clk_levels(struct smu_context *smu, } ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_SetMinLinkDpmByIndex, soft_min_level); + SMU_MSG_SetMinLinkDpmByIndex, + soft_min_level, + NULL); if (ret) pr_err("Failed to set min link dpm level!\n"); @@ -1477,13 +1484,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetAVFSVoltageByDpm, - ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq)); + ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), + voltage); if (ret) { pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); return ret; } - smu_read_smc_arg(smu, voltage); *voltage = *voltage / VOLTAGE_SCALE; return 0; @@ -1956,8 +1963,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u workload_type = smu_workload_get_type(smu, smu->power_profile_mode); if (workload_type < 0) return -EINVAL; - smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + smu_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + 1 << workload_type, + NULL); return ret; } @@ -2029,7 +2038,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu, dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level); + (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level, + NULL); if (ret) { pr_err("[%s] Set hard min uclk failed!", __func__); return ret; @@ -2047,7 +2057,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu) if (!smu->smu_dpm.dpm_context) return -EINVAL; - smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); + smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL); ret = vega20_set_uclk_to_highest_dpm_level(smu, &dpm_table->mem_table); if (ret) @@ -2074,7 +2084,8 @@ static int vega20_display_config_changed(struct smu_context *smu) smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, - smu->display_config->num_display); + smu->display_config->num_display, + NULL); } return ret; @@ -2247,7 +2258,8 @@ vega20_notify_smc_display_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcef_clock_in_sr/100); + min_clocks.dcef_clock_in_sr/100, + NULL); if (ret) { pr_err("Attempt to set divider for DCEFCLK Failed!"); return ret; @@ -2262,7 +2274,8 @@ vega20_notify_smc_display_config(struct smu_context *smu) memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level); + (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level, + NULL); if (ret) { pr_err("[%s] Set hard min uclk failed!", __func__); return ret; @@ -2853,8 +2866,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; PPTable_t *pptable = table_context->driver_pptable; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget, - (uint32_t)pptable->FanTargetTemperature); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetFanTemperatureTarget, + (uint32_t)pptable->FanTargetTemperature, + NULL); return ret; } @@ -2864,15 +2879,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu, { int ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); + ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed); if (ret) { pr_err("Attempt to get current RPM from SMC Failed!\n"); return ret; } - smu_read_smc_arg(smu, speed); - return 0; } @@ -3137,7 +3150,7 @@ static int vega20_set_df_cstate(struct smu_context *smu, return -EINVAL; } - return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } static int vega20_update_pcie_parameters(struct smu_context *smu, @@ -3155,12 +3168,24 @@ static int vega20_update_pcie_parameters(struct smu_context *smu, pptable->PcieLaneCount[i] : pcie_width_cap); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, - smu_pcie_arg); + smu_pcie_arg, + NULL); } return ret; } +static bool vega20_is_baco_supported(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t val; + + if (!smu_v11_0_baco_is_support(smu)) + return false; + + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; +} static const struct pptable_funcs vega20_ppt_funcs = { .tables_init = vega20_tables_init, @@ -3229,7 +3254,6 @@ static const struct pptable_funcs vega20_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, @@ -3250,7 +3274,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support= smu_v11_0_baco_is_support, + .baco_is_support= vega20_is_baco_supported, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = smu_v11_0_baco_enter, |